ANALYSIS AND PREDICTION OF CHURN

IMPORTING THE NECESSARY LIBRARIES
In [120]:
import pandas as pd 
import numpy as np

import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib.gridspec as gridspec
import seaborn as sns

sns.set_style('darkgrid')
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
import os

import plotly.graph_objs as go 
from plotly.offline import init_notebook_mode, iplot
import plotly.express as px

from collections import Counter

from sklearn.metrics import accuracy_score,mean_squared_error,roc_curve,roc_auc_score,classification_report,r2_score,confusion_matrix
#train test split, Grid Search CV
from sklearn.model_selection import train_test_split,cross_val_score,ShuffleSplit,GridSearchCV

from sklearn.model_selection import train_test_split
#for one hot encoding with sklearn
from sklearn.preprocessing import OneHotEncoder
#for one hot encoding with feature-engine
from feature_engine.categorical_encoders import OneHotCategoricalEncoder
In [2]:
churn = pd.read_csv("Churn_Modelling.csv")
churn.head()
Out[2]:
RowNumber CustomerId Surname CreditScore Geography Gender Age Tenure Balance NumOfProducts HasCrCard IsActiveMember EstimatedSalary Exited
0 1 15634602 Hargrave 619 France Female 42 2 0.00 1 1 1 101348.88 1
1 2 15647311 Hill 608 Spain Female 41 1 83807.86 1 0 1 112542.58 0
2 3 15619304 Onio 502 France Female 42 8 159660.80 3 1 0 113931.57 1
3 4 15701354 Boni 699 France Female 39 1 0.00 2 0 0 93826.63 0
4 5 15737888 Mitchell 850 Spain Female 43 2 125510.82 1 1 1 79084.10 0
In [3]:
churn.tail()
Out[3]:
RowNumber CustomerId Surname CreditScore Geography Gender Age Tenure Balance NumOfProducts HasCrCard IsActiveMember EstimatedSalary Exited
9995 9996 15606229 Obijiaku 771 France Male 39 5 0.00 2 1 0 96270.64 0
9996 9997 15569892 Johnstone 516 France Male 35 10 57369.61 1 1 1 101699.77 0
9997 9998 15584532 Liu 709 France Female 36 7 0.00 1 0 1 42085.58 1
9998 9999 15682355 Sabbatini 772 Germany Male 42 3 75075.31 2 1 0 92888.52 1
9999 10000 15628319 Walker 792 France Female 28 4 130142.79 1 1 0 38190.78 0
In [44]:
churn = churn.copy()
churn.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 10000 entries, 0 to 9999
Data columns (total 14 columns):
 #   Column           Non-Null Count  Dtype  
---  ------           --------------  -----  
 0   RowNumber        10000 non-null  int64  
 1   CustomerId       10000 non-null  int64  
 2   Surname          10000 non-null  object 
 3   CreditScore      10000 non-null  int64  
 4   Geography        10000 non-null  object 
 5   Gender           10000 non-null  object 
 6   Age              10000 non-null  int64  
 7   Tenure           10000 non-null  int64  
 8   Balance          10000 non-null  float64
 9   NumOfProducts    10000 non-null  int64  
 10  HasCrCard        10000 non-null  int64  
 11  IsActiveMember   10000 non-null  int64  
 12  EstimatedSalary  10000 non-null  float64
 13  Exited           10000 non-null  int64  
dtypes: float64(2), int64(9), object(3)
memory usage: 1.1+ MB

PERFORMING EDA

In [4]:
churn.dtypes
Out[4]:
RowNumber            int64
CustomerId           int64
Surname             object
CreditScore          int64
Geography           object
Gender              object
Age                  int64
Tenure               int64
Balance            float64
NumOfProducts        int64
HasCrCard            int64
IsActiveMember       int64
EstimatedSalary    float64
Exited               int64
dtype: object
  • The dataset comprises of both numerical and catagorical variables/features
In [5]:
churn.isna().sum()
Out[5]:
RowNumber          0
CustomerId         0
Surname            0
CreditScore        0
Geography          0
Gender             0
Age                0
Tenure             0
Balance            0
NumOfProducts      0
HasCrCard          0
IsActiveMember     0
EstimatedSalary    0
Exited             0
dtype: int64
  • The dataset has no missing values
In [ ]:
 
In [7]:
# Removing the first three columns from the data set because of their irrelvance'

# churn = churn.drop(["RowNumber", "CustomerId", "Surname"], axis=1)
# churn.head(5)
After dropping the three variables from the dataset, i am now left with 11 columns

Statistical Summary

In [7]:
churn['Exited'].value_counts(normalize=True) # counting the number of customers that churn and those that didn't churn
Out[7]:
0    0.7963
1    0.2037
Name: Exited, dtype: float64
In [8]:
# Knowing the % of those that have exited and those that have not exited

print(sum(churn["Exited"]) / len(churn["Exited"]))
print(1 - sum(churn["Exited"]) / len(churn["Exited"]))
0.2037
0.7963

It shows that 0.2037% of customers as exited

In [45]:
churn.drop(["RowNumber", "CustomerId"], axis=1).describe().T
Out[45]:
count mean std min 25% 50% 75% max
CreditScore 10000.0 650.528800 96.653299 350.00 584.00 652.000 718.0000 850.00
Age 10000.0 38.921800 10.487806 18.00 32.00 37.000 44.0000 92.00
Tenure 10000.0 5.012800 2.892174 0.00 3.00 5.000 7.0000 10.00
Balance 10000.0 76485.889288 62397.405202 0.00 0.00 97198.540 127644.2400 250898.09
NumOfProducts 10000.0 1.530200 0.581654 1.00 1.00 1.000 2.0000 4.00
HasCrCard 10000.0 0.705500 0.455840 0.00 0.00 1.000 1.0000 1.00
IsActiveMember 10000.0 0.515100 0.499797 0.00 0.00 1.000 1.0000 1.00
EstimatedSalary 10000.0 100090.239881 57510.492818 11.58 51002.11 100193.915 149388.2475 199992.48
Exited 10000.0 0.203700 0.402769 0.00 0.00 0.000 0.0000 1.00
In [ ]:
 
In [ ]:
 
In [50]:
# Checking for correlation matrix 
churn.corr()
Out[50]:
RowNumber CustomerId CreditScore Age Tenure Balance NumOfProducts HasCrCard IsActiveMember EstimatedSalary Exited
RowNumber 1.000000 0.004202 0.005840 0.000783 -0.006495 -0.009067 0.007246 0.000599 0.012044 -0.005988 -0.016571
CustomerId 0.004202 1.000000 0.005308 0.009497 -0.014883 -0.012419 0.016972 -0.014025 0.001665 0.015271 -0.006248
CreditScore 0.005840 0.005308 1.000000 -0.003965 0.000842 0.006268 0.012238 -0.005458 0.025651 -0.001384 -0.027094
Age 0.000783 0.009497 -0.003965 1.000000 -0.009997 0.028308 -0.030680 -0.011721 0.085472 -0.007201 0.285323
Tenure -0.006495 -0.014883 0.000842 -0.009997 1.000000 -0.012254 0.013444 0.022583 -0.028362 0.007784 -0.014001
Balance -0.009067 -0.012419 0.006268 0.028308 -0.012254 1.000000 -0.304180 -0.014858 -0.010084 0.012797 0.118533
NumOfProducts 0.007246 0.016972 0.012238 -0.030680 0.013444 -0.304180 1.000000 0.003183 0.009612 0.014204 -0.047820
HasCrCard 0.000599 -0.014025 -0.005458 -0.011721 0.022583 -0.014858 0.003183 1.000000 -0.011866 -0.009933 -0.007138
IsActiveMember 0.012044 0.001665 0.025651 0.085472 -0.028362 -0.010084 0.009612 -0.011866 1.000000 -0.011421 -0.156128
EstimatedSalary -0.005988 0.015271 -0.001384 -0.007201 0.007784 0.012797 0.014204 -0.009933 -0.011421 1.000000 0.012097
Exited -0.016571 -0.006248 -0.027094 0.285323 -0.014001 0.118533 -0.047820 -0.007138 -0.156128 0.012097 1.000000
  • From the analysis above, it shows that the variables are not coorrelated.
In [53]:
plt.style.use("ggplot")
f,ax=plt.subplots(figsize = (15, 10))
sns.heatmap(churn.drop(["RowNumber", "CustomerId"], axis=1).corr(),
            robust=True, fmt=' .1g',
            linewidths=1.3,
            linecolor='green',
            annot=True,);

Visualizing the Dataset

  • Countries with the less and the most common use of credit cards
  • The country with the highest salary
  • Visualization of credit card’s usage according to ages
  • Rates of credit card’s usage according to gender
  • The country with the highest credit score
  • The gender with the highest credit score
  • The range of age with the highest credit score
  • Customers in which age range work longer with their banks
  • Customers leaving the bank
  • Female – Male rates
  • Age distribution
  • Salary distribution
In [11]:
# Visualizing the count of 'exited customers' in the dataset
plt.figure(figsize=(7,8))
sns.countplot(x='Exited', data=churn)
plt.xlabel('0: Customers still with the bank, 1: Customers exited the bank')
plt.ylabel('Count')
plt.title('Customers Churn Visualization')
plt.show()
In [54]:
plt.figure(figsize=(10,8))
sns.countplot(x="HasCrCard",
              hue = "Geography",
              data = churn, palette="husl");

print(churn.groupby('Geography')['HasCrCard'].sum())
Geography
France     3543
Germany    1791
Spain      1721
Name: HasCrCard, dtype: int64
In [56]:
plt.figure(figsize=(10,8))
sns.countplot(x="HasCrCard",
              hue = "Exited",
              data = churn);

print(churn.groupby('Geography')['HasCrCard'].sum())
Geography
France     3543
Germany    1791
Spain      1721
Name: HasCrCard, dtype: int64
In [57]:
fig = px.box(churn, x="Geography", y = "EstimatedSalary",color = 'Exited'); # Another visualization about salary effect
fig.update_layout(title_text="The country with the mean salary-With Outliers(Exited-Not Exited groups)")
fig.show();
In [62]:
plt.figure(figsize = (20,8)) 
plt.xticks(rotation=90)
plt.title('Credit Card Usage for Ages',color = 'blue',fontsize=15)
sns.countplot(x=churn["Age"],hue = 'HasCrCard',data=churn);
plt.xlabel('Ages')
plt.ylabel('Number of Credit Card Users');
In [63]:
fig = px.box(churn, x="HasCrCard", y = "Age", color= "Exited");
fig.update_layout(title_text = "Credit Card Usage & Age - With Outliers(Exited-Not Exited groups")
fig.show();
Credit card’s usage according to gender
In [66]:
fig = px.parallel_categories(churn, dimensions=['Gender', 'Geography', 'Exited'],
                color="Exited",
                color_continuous_scale=px.colors.sequential.Inferno,
                labels={'Gender':'Gender(Female,Male)', 'Exited':'Exited(0:No,1:Yes)'})
fig.update_layout(title_text="Gender-Geography-Exited-Not Exited Schema")
fig.show();
In [68]:
fig = px.parallel_categories(churn, dimensions=['Gender','HasCrCard',"IsActiveMember", 'Exited'],
                color="Exited", color_continuous_scale=px.colors.sequential.Inferno,
                labels={'HasCrCard':'Has Credit Card', 'Gender':'Gender(Female,Male)', 'Exited':'Exited(0:No,1:Yes)'})
fig.update_layout(title_text="Credit Card-Gender-Exited-Not Exited Schema")
fig.show(); 
Highest credit score base on Geography
In [69]:
print(churn.groupby("Geography")["CreditScore"].mean())
fig = px.box(churn, x="Geography", y = "CreditScore",color = 'Exited');
fig.update_layout(title_text="The country with the highest credit score(mean)-With Outliers(Exited-Not Exited groups)")
fig.show();
Geography
France     649.668329
Germany    651.453567
Spain      651.333872
Name: CreditScore, dtype: float64
In [ ]:
plt.figure(figsize = (14,8));
sns.catplot(x='Geography',
            y = "CreditScore",
            hue="Exited",
            col="Gender",
            aspect=1.2,height=5,
            kind="swarm", data=df);
Age group with the highest credit score
In [73]:
plt.figure(figsize = (16,6)) 
plt.xticks(rotation=45)
sns.scatterplot(x=churn['Age'],y = churn["CreditScore"],hue = "Gender", data=churn);
In [72]:
plt.figure(figsize = (16,6)) 
plt.xticks(rotation=75)
sns.scatterplot(x=churn['Age'],y = churn["CreditScore"],hue = "Exited", data=churn);
In [77]:
plt.figure(figsize = (20,8)) 
plt.xticks(rotation=45)
sns.countplot(x=churn["Age"],hue = 'Exited',data=churn, palette="husl");
plt.xlabel('Age')
plt.ylabel('Number of customers (Exited or not)');
In [78]:
below_30 = churn[churn["Age"]<30]
between_30_40 = churn[(churn["Age"]>=30) & (churn["Age"]<40)]
between_40_50 = churn[(churn["Age"]>=40) & (churn["Age"]<50)]
between_50_60 = churn[(churn["Age"]>=50) & (churn["Age"]<60)]
between_60_70 = churn[(churn["Age"]>=60) & (churn["Age"]<70)]
above_70 = churn[(churn["Age"]>=70)]



k = below_30["Exited"].sum()
l = between_30_40["Exited"].sum()
m = between_40_50["Exited"].sum()
n = between_50_60["Exited"].sum()
o = between_60_70["Exited"].sum()
p = above_70["Exited"].sum()
In [80]:
f,ax = plt.subplots(figsize=(15, 15))
plt.subplot(6,1,1)
sns.countplot(x=below_30["Age"],hue = 'Exited',data=churn, palette="husl");
plt.xlabel('Age')
plt.ylabel('Customers (Exited)');
plt.xticks(rotation= 30)

plt.subplot(6,1,2)
sns.countplot(x=between_30_40["Age"],hue = 'Exited',data=churn, palette="husl");
plt.xlabel('Age')
plt.ylabel('Customers (Exited)');
plt.xticks(rotation= 30)


plt.subplot(6,1,3)
sns.countplot(x=between_40_50["Age"],hue = 'Exited',data=churn, palette="husl");
plt.xlabel('Age')
plt.ylabel('Customers (Exited)');
plt.xticks(rotation= 30);

plt.subplot(6,1,4)
sns.countplot(x=between_50_60["Age"],hue = 'Exited',data=churn, palette="husl");
plt.xlabel('Age')
plt.ylabel('Customers (Exited)');
plt.xticks(rotation= 30);

plt.subplot(6,1,5)
sns.countplot(x=between_60_70["Age"],hue = 'Exited',data=churn, palette="husl");
plt.xlabel('Age')
plt.ylabel('Customers (Exited)');
plt.xticks(rotation= 30);

plt.subplot(6,1,6)
sns.countplot(x=above_70["Age"],hue = 'Exited',data=churn, palette="husl");
plt.xlabel('Age')
plt.ylabel('Customers (Exited)');
plt.xticks(rotation= 30);
Customers leaving the bank
  • Female – Male rates
  • Age distribution
  • Salary distribution
In [82]:
plt.figure(figsize = (10,6)) 
plt.xticks(rotation=45)
sns.barplot(x=churn['Geography'],y = churn["Exited"],hue = "Gender",data=churn, palette="husl");
plt.ylabel('Percetage of people (Exited %)');
In [83]:
plt.figure(figsize = (9,6)) 
plt.xticks(rotation=45)
sns.countplot(x=churn["Geography"],hue = 'Exited',data=churn, palette="husl");
plt.xlabel('Geo')
plt.ylabel('Number of customers (Exited or not)');
In [84]:
print("Total Number of People By Geography\n",churn["Geography"].value_counts())
print("Number of People Exited By Geography\n",churn[churn['Exited']==1]["Geography"].value_counts(),'\n')

print("Number of People Exited By Gender in Germany \n",churn[(churn['Exited']==1)&(churn['Geography']=='Germany')]["Gender"].value_counts())
print("Number of People Exited By Gender in France \n",churn[(churn['Exited']==1)&(churn['Geography']=='France')]["Gender"].value_counts())
print("Number of People Exited By Gender in Spain \n",churn[(churn['Exited']==1)&(churn['Geography']=='Spain')]["Gender"].value_counts())
Total Number of People By Geography
 France     5014
Germany    2509
Spain      2477
Name: Geography, dtype: int64
Number of People Exited By Geography
 Germany    814
France     810
Spain      413
Name: Geography, dtype: int64 

Number of People Exited By Gender in Germany 
 Female    448
Male      366
Name: Gender, dtype: int64
Number of People Exited By Gender in France 
 Female    460
Male      350
Name: Gender, dtype: int64
Number of People Exited By Gender in Spain 
 Female    231
Male      182
Name: Gender, dtype: int64
Plotting of salary distribution
In [87]:
plt.figure(figsize = (10,6)) 
plt.xticks(rotation=75)
sns.scatterplot(x='Age',y = "EstimatedSalary",hue = "Exited",data=churn);

From the plot above, it shows that salary has no effect in exit decision

In [88]:
plt.figure(figsize = (10,6)) 
plt.xticks(rotation=75)
sns.scatterplot(x='Age',y = "Balance",hue = "Exited",data=churn);
Performing featuring Engineering
In [90]:
churn = churn.copy()
Age

At this stage, i will create new age groups that will help for perdiction score.

In [93]:
age_group_data = [None] * len(churn['Age'])
for i in range(len(churn['Age'])):
    if churn['Age'][i] < 30:
        age_group_data[i] = 'Young'
    elif churn['Age'][i] >=30 and churn['Age'][i] < 40:
        age_group_data[i] = 'Young-Adults'
    elif churn['Age'][i] >=40 and churn['Age'][i] < 50:
        age_group_data[i] = 'Adults'
    elif churn['Age'][i] >=50 and churn['Age'][i] < 60:
        age_group_data[i] = 'Elderly-Adults'
    elif churn['Age'][i] >=60 and churn['Age'][i] < 74:
        age_group_data[i] = 'Old'
    else:
        age_group_data[i] = 'Very-Old'

churn['age_group'] = age_group_data
Credit Score

I will create a boundary line for credit score

In [96]:
Credit = [None] * len(churn['CreditScore'])
for i in range(len(churn['CreditScore'])):
    if churn['CreditScore'][i] < 405:
        Credit[i] = 0
    else:
        Credit[i] = 1
        
churn['new_credit'] = Credit
In [97]:
churn["new_credit"].value_counts()
Out[97]:
1    9979
0      21
Name: new_credit, dtype: int64
In [100]:
sns.factorplot(x = "new_credit", y = "Exited", data = churn ,kind = "bar")
plt.xticks(rotation=75)
plt.ylabel("Exited(Precent)");
In [101]:
g = sns.factorplot(x = "age_group", y = "Exited", data = churn, kind = "bar")
plt.xticks(rotation=45)
g.set_ylabels("Exited")
plt.show()
Building ML Models
  • Interpretation of variables
  • Preparing variables for model building

1 Building dependent and undependent variables 2 Determining Train and Test sets

Building a model

1 Trying all models of the learning algorithms 2 Setting the parameters 3 Determining best parameters 4 Doing Cross validation 5 Finding Acury score 6 The most effective variables will be determined in each model

Comparing the models

1 Visualization of all models’ acury score 2 The model that gives the best results will be determined 3 Research will be done on the mathematical algorithm that creates the best model.

Creation of Dummy Variables
In [102]:
gender_dummies = churn.replace(to_replace={'Gender': {'Female': 0,'Male':1}})
a = pd.get_dummies(churn['Geography'], prefix = "Geo_dummy")
c = pd.get_dummies(churn['age_group'], prefix = "Age_dummy")
In [103]:
frames = [gender_dummies,a,c]  
churn = pd.concat(frames, axis = 1)
churn = churn.drop(["RowNumber","Geography","Surname","CustomerId",'Age','age_group','Geography',"CreditScore"],axis = 1)
churn.head()
Out[103]:
Gender Tenure Balance NumOfProducts HasCrCard IsActiveMember EstimatedSalary Exited new_credit Geo_dummy_France Geo_dummy_Germany Geo_dummy_Spain Age_dummy_Adults Age_dummy_Elderly-Adults Age_dummy_Old Age_dummy_Very-Old Age_dummy_Young Age_dummy_Young-Adults
0 0 2 0.00 1 1 1 101348.88 1 1 1 0 0 1 0 0 0 0 0
1 0 1 83807.86 1 0 1 112542.58 0 1 0 0 1 1 0 0 0 0 0
2 0 8 159660.80 3 1 0 113931.57 1 1 1 0 0 1 0 0 0 0 0
3 0 1 0.00 2 0 0 93826.63 0 1 1 0 0 0 0 0 0 0 1
4 0 2 125510.82 1 1 1 79084.10 0 1 0 0 1 1 0 0 0 0 0

Normalizing Variables

In [104]:
x = churn.drop(["Exited"],axis = 1) #Independent value
y = churn["Exited"] #Depended value 
In [105]:
# data normalization with sklearn
from sklearn.preprocessing import MinMaxScaler

# fit scaler on training data
norm = MinMaxScaler().fit(x)

# transform independent data
x_norm = norm.transform(x)
In [106]:
x_train, x_test,y_train,y_test = train_test_split(x_norm,y,test_size = 0.3, random_state = 42)

Logistic Regression

In [116]:
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score
log_reg = LogisticRegression().fit(x_train,y_train)
y_pred = log_reg.predict(x_test)
log_model = (accuracy_score(y_test,y_pred)*100)
log_model
Out[116]:
83.7
In [117]:
y_probs = log_reg.predict_proba(x_test)[:,1]
y_pred = [1 if i >0.53 else 0 for i in y_probs]
log_proba_score = (accuracy_score(y_test,y_pred)*100)
print ("log score=",log_proba_score)
log score= 83.6
In [118]:
confusion_matrix(y_test,y_pred)
Out[118]:
array([[2347,   69],
       [ 423,  161]], dtype=int64)

Tuning

In [121]:
log_params = {"C":np.logspace(-3,3,7),
              "penalty": ["l1","l2"],
              "max_iter":[10,50,500,1000]} #"solver":['lbfgs', 'liblinear', 'sag', 'saga'],
log =LogisticRegression()
log_cv = GridSearchCV(log,log_params,cv = 10)

log_tuned = log_cv.fit(x_train,y_train)
log_tuned.best_params_
Out[121]:
{'C': 1.0, 'max_iter': 50, 'penalty': 'l2'}
In [122]:
log_reg_tuned = LogisticRegression(C=100,max_iter=50,penalty='l2',solver='liblinear').fit(x_train,y_train)
y_probs = log_reg.predict_proba(x_test)[:,1]
y_pred = [1 if i >0.53 else 0 for i in y_probs]
In [123]:
log_tuned_score = (accuracy_score(y_test,y_pred)*100)
print ("log tuned score=",log_tuned_score)
log tuned score= 83.6

Confusion Matrix

In [124]:
lr_cm = confusion_matrix(y_test,y_pred)
lr_cm
Out[124]:
array([[2347,   69],
       [ 423,  161]], dtype=int64)
In [ ]:
 

Naive Bayes

Gaussian NB

In [125]:
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
gnb_model = nb.fit(x_train,y_train)
gnb_model
Out[125]:
GaussianNB()
In [128]:
y_pred = gnb_model.predict(x_test)
nb_score = (accuracy_score(y_test,y_pred)*100)
nb_score
Out[128]:
82.39999999999999

Tuning

In [129]:
nb_params = {'var_smoothing': np.logspace(0,-9, num=100)}

nb =GaussianNB()
nb_cv = GridSearchCV(nb,nb_params,cv = 10)

nb_cv = nb_cv.fit(x_train,y_train)
nb_cv.best_params_
Out[129]:
{'var_smoothing': 0.43287612810830584}
In [130]:
nb_tuned =GaussianNB(var_smoothing=0.43287612810830584).fit(x_train,y_train)
y_pred = nb_tuned.predict(x_test)
nb_tuned = (accuracy_score(y_test,y_pred)*100)
nb_tuned
Out[130]:
83.66666666666667

Confusion Matrix

In [131]:
nb_cm = confusion_matrix(y_test,y_pred)
nb_cm
Out[131]:
array([[2315,  101],
       [ 389,  195]], dtype=int64)

KNN

In [132]:
from sklearn.neighbors import KNeighborsClassifier
knn =KNeighborsClassifier()
knn_model = knn.fit(x_train,y_train)
knn_model
Out[132]:
KNeighborsClassifier()
In [133]:
y_pred = knn_model.predict(x_test)
knn_score = (accuracy_score(y_test,y_pred)*100)
knn_score
Out[133]:
82.83333333333334

Model Tuning

In [134]:
knn_params = {"n_neighbors":np.arange(1,50),
              "weights": ["uniform","distance"],
              "metric":["euclidean","manhattan"]}
In [135]:
knn =KNeighborsClassifier()
knn_cv = GridSearchCV(knn,knn_params,cv = 10)
knn_cv = knn_cv.fit(x_train,y_train)
In [136]:
print("Best Parameters:"+str(knn_cv.best_params_))
Best Parameters:{'metric': 'manhattan', 'n_neighbors': 8, 'weights': 'uniform'}
In [137]:
knn_final =KNeighborsClassifier(n_neighbors =15,metric='manhattan',weights='distance')
knn_final = knn_final.fit(x_train,y_train)
y_pred = knn_final.predict(x_test)
knn_tuned = (accuracy_score(y_test,y_pred)*100)
knn_tuned
Out[137]:
83.93333333333334

Confusion Matrix

In [138]:
knn_cm = confusion_matrix(y_test,y_pred)
knn_cm
Out[138]:
array([[2309,  107],
       [ 375,  209]], dtype=int64)
SVM (Support Vector Machines)
In [139]:
from sklearn.svm import SVC
In [140]:
svm_model_linear = SVC(kernel='linear').fit(x_train,y_train)
svm_model_poly = SVC(kernel='poly').fit(x_train,y_train)
svm_model_rbf = SVC(kernel='rbf').fit(x_train,y_train)
In [141]:
y_pred_linear = svm_model_linear.predict(x_test)
y_pred_poly = svm_model_poly.predict(x_test)
y_pred_rbf = svm_model_rbf.predict(x_test)
In [142]:
print(accuracy_score(y_test,y_pred_linear)*100)
print(accuracy_score(y_test,y_pred_poly)*100)
print(accuracy_score(y_test,y_pred_rbf)*100)
81.86666666666666
85.46666666666667
84.63333333333334
Model Tuning

Polinomal kernel(poly)

In [143]:
svc_params = {"C": [1,5,10,50,100,200],
              'kernel':['poly','rbf'],
              "gamma": [0.001, 0.01, 0.1,0.5],}
                 
svc = SVC()
svc_cv_model = GridSearchCV(svc,svc_params,
                            cv = 5,
                           n_jobs = -1,
                           verbose = 2)
svc_cv_model.fit(x_train,y_train)
print("Best Parameters:"+str(svc_cv_model.best_params_))
Fitting 5 folds for each of 48 candidates, totalling 240 fits
[Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers.
[Parallel(n_jobs=-1)]: Done  25 tasks      | elapsed:    9.5s
[Parallel(n_jobs=-1)]: Done 146 tasks      | elapsed:  1.2min
[Parallel(n_jobs=-1)]: Done 240 out of 240 | elapsed: 13.1min finished
Best Parameters:{'C': 50, 'gamma': 0.5, 'kernel': 'poly'}
In [146]:
svc_tuned = SVC(kernel = "poly",C=50,gamma=0.5).fit(x_train,y_train)
In [147]:
y_pred = svc_tuned.predict(x_test)
svc_tuned_score = (accuracy_score(y_test,y_pred)*100)
svc_tuned_score
Out[147]:
86.73333333333333
In [148]:
confusion_matrix(y_test,y_pred)
Out[148]:
array([[2312,  104],
       [ 294,  290]], dtype=int64)
In [144]:
##### Radial basis function kernel(rbf)
In [150]:
svc_rbf_tuned = SVC(kernel = "rbf",C=50,gamma=0.1).fit(x_train,y_train)
y_pred = svc_rbf_tuned.predict(x_test)
In [151]:
svc_rbf_score = (accuracy_score(y_test,y_pred)*100)
svc_rbf_score
Out[151]:
86.03333333333333
Confusion Matrix
In [153]:
svm_cm = confusion_matrix(y_test,y_pred)
svm_cm
Out[153]:
array([[2338,   78],
       [ 341,  243]], dtype=int64)
Random Forest
In [154]:
from sklearn.ensemble import RandomForestClassifier
r_for = RandomForestClassifier().fit(x_train,y_train)
r_for
Out[154]:
RandomForestClassifier()
In [155]:
y_pred = r_for.predict(x_test)
rf_score = accuracy_score(y_test,y_pred)*100
rf_score
Out[155]:
85.53333333333333
Model Tuning
In [156]:
rf_params  = {'max_depth':list(range(1,10)),
             "max_features":["log2","auto","sqrt"],
             "n_estimators":[2,10,20,50,150,300],
             'criterion' : ['gini','entropy'],
             'min_samples_leaf' : [1,3,5,10]}
In [157]:
rf_model = RandomForestClassifier()
In [158]:
rf_cv_model = GridSearchCV(rf_model,
                           rf_params,
                           cv = 5,
                           n_jobs = -1)
In [159]:
rf_cv_model.fit(x_train,y_train)
rf_cv_model.best_params_
Out[159]:
{'criterion': 'entropy',
 'max_depth': 9,
 'max_features': 'sqrt',
 'min_samples_leaf': 1,
 'n_estimators': 50}
In [160]:
rf_tuned = RandomForestClassifier(max_depth = 10,
                                  criterion = 'gini',
                                  max_features = 'log2',
                                  min_samples_leaf = 1,
                                  n_estimators = 150,random_state=42)
rf_tuned = rf_tuned.fit(x_train,y_train)
y_pred  = rf_tuned.predict(x_test)
rf_tuned_score = (accuracy_score(y_test,y_pred)*100)
rf_tuned_score
Out[160]:
86.7
Confusion Matrix
In [161]:
rf_cm = confusion_matrix(y_test,y_pred)
rf_cm
Out[161]:
array([[2347,   69],
       [ 330,  254]], dtype=int64)
Gradient Boosting Machines (GBM)
In [163]:
from sklearn.ensemble import GradientBoostingClassifier
gbm = GradientBoostingClassifier()
gbm_model = gbm.fit(x_train,y_train) 
gbm_model
Out[163]:
GradientBoostingClassifier()
In [164]:
y_pred = gbm_model.predict(x_test)
gbm_score = accuracy_score(y_test,y_pred)*100
gbm_score
Out[164]:
86.73333333333333
Tuning
In [165]:
gbm_params = {"learning_rate" : [0.001, 0.01, 0.1, 0.2],
             "n_estimators": [100,200,300,500,1000],
             "max_depth": [1,3,5,10],
             "min_samples_split": [1,2,5,10]}
gbm = GradientBoostingClassifier()
clf = GridSearchCV(gbm,gbm_params,verbose=0,n_jobs=-1,cv=3)
gb = clf.fit(x_train,y_train)
gb.best_params_ 
Out[165]:
{'learning_rate': 0.1,
 'max_depth': 3,
 'min_samples_split': 5,
 'n_estimators': 100}
In [166]:
gbm = GradientBoostingClassifier(n_estimators=100,min_samples_split=5,max_depth=3,learning_rate=0.2,random_state=42)
gbm.fit(x_train,y_train)
y_pred = gbm.predict(x_test)
gbm_tuned_score = accuracy_score(y_test,y_pred)*100
gbm_tuned_score
Out[166]:
86.76666666666667
Confusion Matrix
In [168]:
gbm_cm = confusion_matrix(y_test,y_pred)
gbm_cm
Out[168]:
array([[2330,   86],
       [ 311,  273]], dtype=int64)
XGBoost
In [169]:
from xgboost import XGBClassifier
xgb = XGBClassifier(n_estimators=100)
xgb_model = xgb.fit(x_train,y_train) 
xgb_model
Out[169]:
XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
              colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1,
              importance_type='gain', interaction_constraints='',
              learning_rate=0.300000012, max_delta_step=0, max_depth=6,
              min_child_weight=1, missing=nan, monotone_constraints='()',
              n_estimators=100, n_jobs=0, num_parallel_tree=1, random_state=0,
              reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1,
              tree_method='exact', validate_parameters=1, verbosity=None)
In [170]:
y_pred = xgb_model.predict(x_test)
xgb_score = accuracy_score(y_test,y_pred)*100
xgb_score
Out[170]:
86.06666666666666
Tuning
In [171]:
xgb_params ={
        'n_estimators': [50, 100, 200],
        'subsample': [ 0.6, 0.8, 1.0],
        'max_depth': [1,2,3,4],
        'learning_rate': [0.1,0.2, 0.3, 0.4, 0.5],
        "min_samples_split": [1,2,4,6]}
In [172]:
xgb = XGBClassifier()
xgb = GridSearchCV(xgb,xgb_params,verbose=0,n_jobs=-1,cv=3)
xgb = xgb.fit(x_train,y_train)
xgb.best_params_
[20:48:38] WARNING: C:\Users\Administrator\workspace\xgboost-win64_release_1.2.0\src\learner.cc:516: 
Parameters: { min_samples_split } might not be used.

  This may not be accurate due to some parameters are only used in language bindings but
  passed down to XGBoost core.  Or some parameters are not used but slip through this
  verification. Please open an issue if you find above cases.


Out[172]:
{'learning_rate': 0.5,
 'max_depth': 2,
 'min_samples_split': 1,
 'n_estimators': 50,
 'subsample': 0.8}
In [173]:
xgbm_cv = XGBClassifier(learning_rate=0.3,
                       max_depth=2,
                       min_samples_split=1,
                       n_estimators=100,
                       subsample=1.0,random_state=42).fit(x_train,y_train)
[20:49:58] WARNING: C:\Users\Administrator\workspace\xgboost-win64_release_1.2.0\src\learner.cc:516: 
Parameters: { min_samples_split } might not be used.

  This may not be accurate due to some parameters are only used in language bindings but
  passed down to XGBoost core.  Or some parameters are not used but slip through this
  verification. Please open an issue if you find above cases.


In [174]:
y_pred = xgbm_cv.predict(x_test)
xgbm_score = (accuracy_score(y_test,y_pred)*100)
xgbm_score
Out[174]:
86.5
Confusion Matrix
In [175]:
xgbm_cm = confusion_matrix(y_test,y_pred)
xgbm_cm
Out[175]:
array([[2333,   83],
       [ 322,  262]], dtype=int64)
Light GBM
In [176]:
from lightgbm import LGBMClassifier
lgbm = LGBMClassifier().fit(x_train,y_train)
y_pred = lgbm.predict(x_test)
In [177]:
lgbm_score = (accuracy_score(y_test,y_pred)*100)
lgbm_score
Out[177]:
86.76666666666667
Tuning
In [178]:
lgbm_params = {"learning_rate" : [0.001,0.01, 0.1],
             "n_estimators": [100,200,300,500,1000],
             "max_depth": [2,3,5,7],
             "min_child_samples": [1,3,5,7]}
lgbm = LGBMClassifier()
lgbm_cv = GridSearchCV(lgbm,lgbm_params,verbose=0,n_jobs=-1,cv=5)
lgbm_cv_model = lgbm_cv.fit(x_train,y_train)
lgbm_cv_model.best_params_
Out[178]:
{'learning_rate': 0.01,
 'max_depth': 3,
 'min_child_samples': 3,
 'n_estimators': 1000}
In [179]:
lgbm = LGBMClassifier(learning_rate=0.01,
                      max_depth=5,
                      min_child_samples=5,
                      n_estimators=400)
lgbm_tuned = lgbm.fit(x_train,y_train)
y_pred = lgbm_tuned.predict(x_test)
lgbm_tuned_acc = (accuracy_score(y_test,y_pred)*100)
lgbm_tuned_acc
Out[179]:
87.1
In [180]:
lgbm_cm = confusion_matrix(y_test,y_pred)
lgbm_cm
Out[180]:
array([[2351,   65],
       [ 322,  262]], dtype=int64)
Cat-Boost
In [181]:
from catboost import CatBoostClassifier
cat_model = CatBoostClassifier().fit(x_train,y_train)
y_pred = cat_model.predict(x_test)
Learning rate set to 0.023648
0:	learn: 0.6744859	total: 147ms	remaining: 2m 26s
1:	learn: 0.6569687	total: 154ms	remaining: 1m 17s
2:	learn: 0.6445656	total: 159ms	remaining: 52.8s
3:	learn: 0.6278879	total: 167ms	remaining: 41.6s
4:	learn: 0.6163601	total: 174ms	remaining: 34.6s
5:	learn: 0.6018561	total: 182ms	remaining: 30.1s
6:	learn: 0.5909632	total: 188ms	remaining: 26.7s
7:	learn: 0.5804950	total: 198ms	remaining: 24.5s
8:	learn: 0.5704407	total: 202ms	remaining: 22.3s
9:	learn: 0.5589911	total: 209ms	remaining: 20.7s
10:	learn: 0.5476883	total: 214ms	remaining: 19.3s
11:	learn: 0.5377425	total: 220ms	remaining: 18.1s
12:	learn: 0.5288395	total: 224ms	remaining: 17s
13:	learn: 0.5214097	total: 229ms	remaining: 16.2s
14:	learn: 0.5123212	total: 234ms	remaining: 15.4s
15:	learn: 0.5043345	total: 239ms	remaining: 14.7s
16:	learn: 0.4972805	total: 243ms	remaining: 14s
17:	learn: 0.4894524	total: 247ms	remaining: 13.5s
18:	learn: 0.4830029	total: 253ms	remaining: 13.1s
19:	learn: 0.4781403	total: 259ms	remaining: 12.7s
20:	learn: 0.4730948	total: 264ms	remaining: 12.3s
21:	learn: 0.4684628	total: 270ms	remaining: 12s
22:	learn: 0.4627825	total: 276ms	remaining: 11.7s
23:	learn: 0.4584854	total: 283ms	remaining: 11.5s
24:	learn: 0.4532444	total: 289ms	remaining: 11.3s
25:	learn: 0.4482704	total: 294ms	remaining: 11s
26:	learn: 0.4451309	total: 300ms	remaining: 10.8s
27:	learn: 0.4406935	total: 306ms	remaining: 10.6s
28:	learn: 0.4359552	total: 313ms	remaining: 10.5s
29:	learn: 0.4333320	total: 321ms	remaining: 10.4s
30:	learn: 0.4302786	total: 327ms	remaining: 10.2s
31:	learn: 0.4273473	total: 332ms	remaining: 10s
32:	learn: 0.4236856	total: 336ms	remaining: 9.86s
33:	learn: 0.4200651	total: 341ms	remaining: 9.69s
34:	learn: 0.4166416	total: 351ms	remaining: 9.67s
35:	learn: 0.4135560	total: 358ms	remaining: 9.58s
36:	learn: 0.4113626	total: 363ms	remaining: 9.45s
37:	learn: 0.4092765	total: 369ms	remaining: 9.34s
38:	learn: 0.4069327	total: 374ms	remaining: 9.21s
39:	learn: 0.4046918	total: 380ms	remaining: 9.13s
40:	learn: 0.4021637	total: 387ms	remaining: 9.05s
41:	learn: 0.4005031	total: 393ms	remaining: 8.96s
42:	learn: 0.3981975	total: 397ms	remaining: 8.84s
43:	learn: 0.3962108	total: 402ms	remaining: 8.73s
44:	learn: 0.3938857	total: 407ms	remaining: 8.64s
45:	learn: 0.3923532	total: 413ms	remaining: 8.55s
46:	learn: 0.3906458	total: 418ms	remaining: 8.48s
47:	learn: 0.3891039	total: 423ms	remaining: 8.39s
48:	learn: 0.3877241	total: 428ms	remaining: 8.31s
49:	learn: 0.3860205	total: 436ms	remaining: 8.28s
50:	learn: 0.3842052	total: 442ms	remaining: 8.22s
51:	learn: 0.3826958	total: 447ms	remaining: 8.15s
52:	learn: 0.3816404	total: 453ms	remaining: 8.09s
53:	learn: 0.3802179	total: 457ms	remaining: 8.01s
54:	learn: 0.3792572	total: 461ms	remaining: 7.92s
55:	learn: 0.3778718	total: 466ms	remaining: 7.85s
56:	learn: 0.3763903	total: 470ms	remaining: 7.77s
57:	learn: 0.3751104	total: 474ms	remaining: 7.7s
58:	learn: 0.3741002	total: 478ms	remaining: 7.63s
59:	learn: 0.3731029	total: 483ms	remaining: 7.56s
60:	learn: 0.3722945	total: 487ms	remaining: 7.49s
61:	learn: 0.3710267	total: 490ms	remaining: 7.42s
62:	learn: 0.3699182	total: 494ms	remaining: 7.35s
63:	learn: 0.3693419	total: 499ms	remaining: 7.29s
64:	learn: 0.3684505	total: 504ms	remaining: 7.25s
65:	learn: 0.3676202	total: 509ms	remaining: 7.2s
66:	learn: 0.3667576	total: 515ms	remaining: 7.16s
67:	learn: 0.3657782	total: 518ms	remaining: 7.1s
68:	learn: 0.3649949	total: 522ms	remaining: 7.05s
69:	learn: 0.3640201	total: 526ms	remaining: 6.99s
70:	learn: 0.3631727	total: 530ms	remaining: 6.93s
71:	learn: 0.3627047	total: 533ms	remaining: 6.88s
72:	learn: 0.3616896	total: 538ms	remaining: 6.83s
73:	learn: 0.3610382	total: 541ms	remaining: 6.77s
74:	learn: 0.3604776	total: 546ms	remaining: 6.73s
75:	learn: 0.3598925	total: 550ms	remaining: 6.68s
76:	learn: 0.3590545	total: 553ms	remaining: 6.63s
77:	learn: 0.3584210	total: 557ms	remaining: 6.59s
78:	learn: 0.3579467	total: 561ms	remaining: 6.54s
79:	learn: 0.3573897	total: 565ms	remaining: 6.5s
80:	learn: 0.3569555	total: 569ms	remaining: 6.45s
81:	learn: 0.3562543	total: 573ms	remaining: 6.41s
82:	learn: 0.3556121	total: 577ms	remaining: 6.37s
83:	learn: 0.3550021	total: 580ms	remaining: 6.33s
84:	learn: 0.3544793	total: 584ms	remaining: 6.29s
85:	learn: 0.3538011	total: 588ms	remaining: 6.25s
86:	learn: 0.3532885	total: 592ms	remaining: 6.21s
87:	learn: 0.3529667	total: 596ms	remaining: 6.18s
88:	learn: 0.3523867	total: 601ms	remaining: 6.15s
89:	learn: 0.3516241	total: 605ms	remaining: 6.12s
90:	learn: 0.3512105	total: 609ms	remaining: 6.08s
91:	learn: 0.3506982	total: 614ms	remaining: 6.06s
92:	learn: 0.3503755	total: 622ms	remaining: 6.06s
93:	learn: 0.3501456	total: 632ms	remaining: 6.09s
94:	learn: 0.3496035	total: 640ms	remaining: 6.1s
95:	learn: 0.3490134	total: 649ms	remaining: 6.11s
96:	learn: 0.3484954	total: 657ms	remaining: 6.12s
97:	learn: 0.3480194	total: 666ms	remaining: 6.13s
98:	learn: 0.3477418	total: 675ms	remaining: 6.14s
99:	learn: 0.3474400	total: 684ms	remaining: 6.16s
100:	learn: 0.3469502	total: 694ms	remaining: 6.18s
101:	learn: 0.3463312	total: 703ms	remaining: 6.19s
102:	learn: 0.3461502	total: 712ms	remaining: 6.2s
103:	learn: 0.3455904	total: 721ms	remaining: 6.21s
104:	learn: 0.3452590	total: 730ms	remaining: 6.22s
105:	learn: 0.3448050	total: 739ms	remaining: 6.23s
106:	learn: 0.3443093	total: 749ms	remaining: 6.25s
107:	learn: 0.3439899	total: 759ms	remaining: 6.27s
108:	learn: 0.3437449	total: 768ms	remaining: 6.28s
109:	learn: 0.3433915	total: 777ms	remaining: 6.28s
110:	learn: 0.3430451	total: 783ms	remaining: 6.27s
111:	learn: 0.3427210	total: 791ms	remaining: 6.27s
112:	learn: 0.3425132	total: 796ms	remaining: 6.24s
113:	learn: 0.3423327	total: 801ms	remaining: 6.22s
114:	learn: 0.3420804	total: 807ms	remaining: 6.21s
115:	learn: 0.3415759	total: 812ms	remaining: 6.19s
116:	learn: 0.3412977	total: 819ms	remaining: 6.18s
117:	learn: 0.3409605	total: 825ms	remaining: 6.17s
118:	learn: 0.3406005	total: 831ms	remaining: 6.15s
119:	learn: 0.3403574	total: 836ms	remaining: 6.13s
120:	learn: 0.3401165	total: 841ms	remaining: 6.11s
121:	learn: 0.3397840	total: 846ms	remaining: 6.09s
122:	learn: 0.3395756	total: 850ms	remaining: 6.06s
123:	learn: 0.3393102	total: 856ms	remaining: 6.05s
124:	learn: 0.3390735	total: 862ms	remaining: 6.03s
125:	learn: 0.3386482	total: 866ms	remaining: 6.01s
126:	learn: 0.3384080	total: 871ms	remaining: 5.98s
127:	learn: 0.3382431	total: 875ms	remaining: 5.96s
128:	learn: 0.3380216	total: 880ms	remaining: 5.94s
129:	learn: 0.3378238	total: 884ms	remaining: 5.92s
130:	learn: 0.3375944	total: 889ms	remaining: 5.9s
131:	learn: 0.3373109	total: 894ms	remaining: 5.88s
132:	learn: 0.3369639	total: 902ms	remaining: 5.88s
133:	learn: 0.3367765	total: 911ms	remaining: 5.89s
134:	learn: 0.3366103	total: 917ms	remaining: 5.88s
135:	learn: 0.3365027	total: 924ms	remaining: 5.87s
136:	learn: 0.3363729	total: 931ms	remaining: 5.87s
137:	learn: 0.3362640	total: 939ms	remaining: 5.87s
138:	learn: 0.3359216	total: 947ms	remaining: 5.87s
139:	learn: 0.3357639	total: 954ms	remaining: 5.86s
140:	learn: 0.3356238	total: 962ms	remaining: 5.86s
141:	learn: 0.3354169	total: 969ms	remaining: 5.86s
142:	learn: 0.3352889	total: 977ms	remaining: 5.85s
143:	learn: 0.3351063	total: 983ms	remaining: 5.84s
144:	learn: 0.3348875	total: 991ms	remaining: 5.84s
145:	learn: 0.3346445	total: 996ms	remaining: 5.83s
146:	learn: 0.3343828	total: 1s	remaining: 5.83s
147:	learn: 0.3342499	total: 1.01s	remaining: 5.81s
148:	learn: 0.3341789	total: 1.01s	remaining: 5.8s
149:	learn: 0.3337740	total: 1.02s	remaining: 5.79s
150:	learn: 0.3335048	total: 1.03s	remaining: 5.78s
151:	learn: 0.3332830	total: 1.03s	remaining: 5.77s
152:	learn: 0.3332030	total: 1.04s	remaining: 5.75s
153:	learn: 0.3330382	total: 1.05s	remaining: 5.75s
154:	learn: 0.3329616	total: 1.05s	remaining: 5.73s
155:	learn: 0.3327458	total: 1.06s	remaining: 5.72s
156:	learn: 0.3326928	total: 1.06s	remaining: 5.7s
157:	learn: 0.3325421	total: 1.07s	remaining: 5.69s
158:	learn: 0.3322803	total: 1.07s	remaining: 5.67s
159:	learn: 0.3321350	total: 1.08s	remaining: 5.65s
160:	learn: 0.3319796	total: 1.08s	remaining: 5.63s
161:	learn: 0.3317557	total: 1.08s	remaining: 5.61s
162:	learn: 0.3317432	total: 1.09s	remaining: 5.59s
163:	learn: 0.3316335	total: 1.09s	remaining: 5.57s
164:	learn: 0.3314094	total: 1.1s	remaining: 5.55s
165:	learn: 0.3312881	total: 1.1s	remaining: 5.53s
166:	learn: 0.3311213	total: 1.1s	remaining: 5.51s
167:	learn: 0.3309906	total: 1.11s	remaining: 5.5s
168:	learn: 0.3307175	total: 1.11s	remaining: 5.48s
169:	learn: 0.3305800	total: 1.12s	remaining: 5.46s
170:	learn: 0.3304516	total: 1.12s	remaining: 5.45s
171:	learn: 0.3302950	total: 1.13s	remaining: 5.42s
172:	learn: 0.3300678	total: 1.13s	remaining: 5.41s
173:	learn: 0.3299126	total: 1.13s	remaining: 5.39s
174:	learn: 0.3296916	total: 1.14s	remaining: 5.38s
175:	learn: 0.3295077	total: 1.15s	remaining: 5.38s
176:	learn: 0.3293379	total: 1.16s	remaining: 5.39s
177:	learn: 0.3290767	total: 1.17s	remaining: 5.4s
178:	learn: 0.3287761	total: 1.18s	remaining: 5.4s
179:	learn: 0.3286013	total: 1.19s	remaining: 5.4s
180:	learn: 0.3284248	total: 1.2s	remaining: 5.41s
181:	learn: 0.3281936	total: 1.21s	remaining: 5.42s
182:	learn: 0.3280014	total: 1.21s	remaining: 5.42s
183:	learn: 0.3278851	total: 1.22s	remaining: 5.42s
184:	learn: 0.3276445	total: 1.23s	remaining: 5.43s
185:	learn: 0.3274685	total: 1.24s	remaining: 5.43s
186:	learn: 0.3273602	total: 1.25s	remaining: 5.44s
187:	learn: 0.3272852	total: 1.26s	remaining: 5.44s
188:	learn: 0.3272182	total: 1.27s	remaining: 5.45s
189:	learn: 0.3271611	total: 1.28s	remaining: 5.45s
190:	learn: 0.3269385	total: 1.29s	remaining: 5.46s
191:	learn: 0.3267472	total: 1.3s	remaining: 5.46s
192:	learn: 0.3265947	total: 1.31s	remaining: 5.47s
193:	learn: 0.3263643	total: 1.32s	remaining: 5.48s
194:	learn: 0.3262289	total: 1.33s	remaining: 5.49s
195:	learn: 0.3260353	total: 1.34s	remaining: 5.5s
196:	learn: 0.3259277	total: 1.35s	remaining: 5.5s
197:	learn: 0.3258461	total: 1.35s	remaining: 5.49s
198:	learn: 0.3257645	total: 1.36s	remaining: 5.48s
199:	learn: 0.3256514	total: 1.37s	remaining: 5.48s
200:	learn: 0.3254201	total: 1.38s	remaining: 5.47s
201:	learn: 0.3253548	total: 1.38s	remaining: 5.46s
202:	learn: 0.3252295	total: 1.39s	remaining: 5.44s
203:	learn: 0.3251380	total: 1.39s	remaining: 5.42s
204:	learn: 0.3250722	total: 1.4s	remaining: 5.41s
205:	learn: 0.3248710	total: 1.4s	remaining: 5.39s
206:	learn: 0.3247044	total: 1.4s	remaining: 5.38s
207:	learn: 0.3245486	total: 1.41s	remaining: 5.36s
208:	learn: 0.3243934	total: 1.41s	remaining: 5.34s
209:	learn: 0.3242982	total: 1.42s	remaining: 5.33s
210:	learn: 0.3242501	total: 1.42s	remaining: 5.31s
211:	learn: 0.3241867	total: 1.42s	remaining: 5.29s
212:	learn: 0.3240597	total: 1.43s	remaining: 5.28s
213:	learn: 0.3238929	total: 1.43s	remaining: 5.26s
214:	learn: 0.3237281	total: 1.44s	remaining: 5.24s
215:	learn: 0.3235477	total: 1.44s	remaining: 5.23s
216:	learn: 0.3234379	total: 1.44s	remaining: 5.21s
217:	learn: 0.3232731	total: 1.45s	remaining: 5.21s
218:	learn: 0.3231697	total: 1.46s	remaining: 5.21s
219:	learn: 0.3229817	total: 1.47s	remaining: 5.2s
220:	learn: 0.3228465	total: 1.48s	remaining: 5.2s
221:	learn: 0.3226945	total: 1.48s	remaining: 5.2s
222:	learn: 0.3226344	total: 1.49s	remaining: 5.2s
223:	learn: 0.3225344	total: 1.5s	remaining: 5.2s
224:	learn: 0.3223817	total: 1.51s	remaining: 5.2s
225:	learn: 0.3221871	total: 1.52s	remaining: 5.2s
226:	learn: 0.3220183	total: 1.53s	remaining: 5.2s
227:	learn: 0.3219040	total: 1.54s	remaining: 5.21s
228:	learn: 0.3218258	total: 1.55s	remaining: 5.21s
229:	learn: 0.3217048	total: 1.56s	remaining: 5.21s
230:	learn: 0.3215334	total: 1.56s	remaining: 5.19s
231:	learn: 0.3214074	total: 1.56s	remaining: 5.18s
232:	learn: 0.3213786	total: 1.57s	remaining: 5.16s
233:	learn: 0.3213508	total: 1.57s	remaining: 5.14s
234:	learn: 0.3212412	total: 1.57s	remaining: 5.12s
235:	learn: 0.3211482	total: 1.57s	remaining: 5.1s
236:	learn: 0.3210041	total: 1.58s	remaining: 5.08s
237:	learn: 0.3208685	total: 1.58s	remaining: 5.07s
238:	learn: 0.3207901	total: 1.59s	remaining: 5.05s
239:	learn: 0.3206055	total: 1.59s	remaining: 5.04s
240:	learn: 0.3204737	total: 1.6s	remaining: 5.03s
241:	learn: 0.3203463	total: 1.61s	remaining: 5.03s
242:	learn: 0.3202167	total: 1.61s	remaining: 5.03s
243:	learn: 0.3200267	total: 1.62s	remaining: 5.03s
244:	learn: 0.3199552	total: 1.63s	remaining: 5.03s
245:	learn: 0.3199350	total: 1.64s	remaining: 5.03s
246:	learn: 0.3197522	total: 1.65s	remaining: 5.03s
247:	learn: 0.3196337	total: 1.66s	remaining: 5.03s
248:	learn: 0.3195248	total: 1.67s	remaining: 5.03s
249:	learn: 0.3194333	total: 1.68s	remaining: 5.03s
250:	learn: 0.3192388	total: 1.69s	remaining: 5.03s
251:	learn: 0.3191184	total: 1.7s	remaining: 5.03s
252:	learn: 0.3190504	total: 1.7s	remaining: 5.03s
253:	learn: 0.3189172	total: 1.71s	remaining: 5.03s
254:	learn: 0.3187697	total: 1.72s	remaining: 5.03s
255:	learn: 0.3186597	total: 1.73s	remaining: 5.04s
256:	learn: 0.3185960	total: 1.74s	remaining: 5.04s
257:	learn: 0.3184573	total: 1.75s	remaining: 5.04s
258:	learn: 0.3183867	total: 1.76s	remaining: 5.04s
259:	learn: 0.3182746	total: 1.77s	remaining: 5.04s
260:	learn: 0.3181881	total: 1.78s	remaining: 5.04s
261:	learn: 0.3180357	total: 1.79s	remaining: 5.04s
262:	learn: 0.3180126	total: 1.8s	remaining: 5.03s
263:	learn: 0.3178764	total: 1.81s	remaining: 5.04s
264:	learn: 0.3178065	total: 1.81s	remaining: 5.03s
265:	learn: 0.3176209	total: 1.82s	remaining: 5.03s
266:	learn: 0.3175437	total: 1.83s	remaining: 5.02s
267:	learn: 0.3174644	total: 1.84s	remaining: 5.02s
268:	learn: 0.3173441	total: 1.85s	remaining: 5.02s
269:	learn: 0.3172285	total: 1.85s	remaining: 5.01s
270:	learn: 0.3171149	total: 1.86s	remaining: 5.01s
271:	learn: 0.3169964	total: 1.87s	remaining: 5.01s
272:	learn: 0.3168314	total: 1.88s	remaining: 5.01s
273:	learn: 0.3167623	total: 1.89s	remaining: 5.01s
274:	learn: 0.3167211	total: 1.9s	remaining: 5.01s
275:	learn: 0.3165472	total: 1.91s	remaining: 5.01s
276:	learn: 0.3164727	total: 1.92s	remaining: 5s
277:	learn: 0.3163334	total: 1.93s	remaining: 5s
278:	learn: 0.3162260	total: 1.94s	remaining: 5s
279:	learn: 0.3160907	total: 1.95s	remaining: 5s
280:	learn: 0.3159105	total: 1.95s	remaining: 5s
281:	learn: 0.3158039	total: 1.96s	remaining: 5s
282:	learn: 0.3156452	total: 1.97s	remaining: 5s
283:	learn: 0.3155563	total: 1.98s	remaining: 5s
284:	learn: 0.3154573	total: 1.99s	remaining: 5s
285:	learn: 0.3152680	total: 2s	remaining: 4.99s
286:	learn: 0.3151619	total: 2.01s	remaining: 4.99s
287:	learn: 0.3150610	total: 2.02s	remaining: 4.99s
288:	learn: 0.3149696	total: 2.03s	remaining: 4.99s
289:	learn: 0.3149000	total: 2.04s	remaining: 4.99s
290:	learn: 0.3147247	total: 2.04s	remaining: 4.98s
291:	learn: 0.3146067	total: 2.05s	remaining: 4.98s
292:	learn: 0.3145794	total: 2.06s	remaining: 4.98s
293:	learn: 0.3144457	total: 2.07s	remaining: 4.97s
294:	learn: 0.3143730	total: 2.08s	remaining: 4.97s
295:	learn: 0.3142776	total: 2.09s	remaining: 4.97s
296:	learn: 0.3140768	total: 2.1s	remaining: 4.97s
297:	learn: 0.3139623	total: 2.11s	remaining: 4.97s
298:	learn: 0.3138688	total: 2.12s	remaining: 4.96s
299:	learn: 0.3138096	total: 2.13s	remaining: 4.96s
300:	learn: 0.3137227	total: 2.13s	remaining: 4.96s
301:	learn: 0.3135877	total: 2.14s	remaining: 4.96s
302:	learn: 0.3134901	total: 2.15s	remaining: 4.95s
303:	learn: 0.3133616	total: 2.16s	remaining: 4.95s
304:	learn: 0.3132542	total: 2.17s	remaining: 4.95s
305:	learn: 0.3131076	total: 2.18s	remaining: 4.95s
306:	learn: 0.3130275	total: 2.19s	remaining: 4.95s
307:	learn: 0.3129526	total: 2.2s	remaining: 4.94s
308:	learn: 0.3129096	total: 2.21s	remaining: 4.94s
309:	learn: 0.3128261	total: 2.22s	remaining: 4.94s
310:	learn: 0.3127001	total: 2.23s	remaining: 4.93s
311:	learn: 0.3126384	total: 2.23s	remaining: 4.93s
312:	learn: 0.3125186	total: 2.24s	remaining: 4.92s
313:	learn: 0.3124650	total: 2.25s	remaining: 4.92s
314:	learn: 0.3124137	total: 2.26s	remaining: 4.91s
315:	learn: 0.3123276	total: 2.27s	remaining: 4.91s
316:	learn: 0.3122486	total: 2.28s	remaining: 4.91s
317:	learn: 0.3120808	total: 2.29s	remaining: 4.91s
318:	learn: 0.3120033	total: 2.29s	remaining: 4.9s
319:	learn: 0.3118555	total: 2.3s	remaining: 4.89s
320:	learn: 0.3117153	total: 2.31s	remaining: 4.89s
321:	learn: 0.3116585	total: 2.32s	remaining: 4.88s
322:	learn: 0.3114834	total: 2.32s	remaining: 4.87s
323:	learn: 0.3114128	total: 2.33s	remaining: 4.86s
324:	learn: 0.3113512	total: 2.33s	remaining: 4.84s
325:	learn: 0.3111778	total: 2.34s	remaining: 4.83s
326:	learn: 0.3111065	total: 2.34s	remaining: 4.82s
327:	learn: 0.3110373	total: 2.35s	remaining: 4.81s
328:	learn: 0.3110003	total: 2.35s	remaining: 4.8s
329:	learn: 0.3109417	total: 2.36s	remaining: 4.79s
330:	learn: 0.3107461	total: 2.36s	remaining: 4.77s
331:	learn: 0.3105828	total: 2.37s	remaining: 4.76s
332:	learn: 0.3105011	total: 2.37s	remaining: 4.75s
333:	learn: 0.3103471	total: 2.38s	remaining: 4.74s
334:	learn: 0.3102526	total: 2.38s	remaining: 4.73s
335:	learn: 0.3101522	total: 2.39s	remaining: 4.72s
336:	learn: 0.3100109	total: 2.39s	remaining: 4.7s
337:	learn: 0.3099275	total: 2.4s	remaining: 4.69s
338:	learn: 0.3098263	total: 2.4s	remaining: 4.68s
339:	learn: 0.3097439	total: 2.41s	remaining: 4.67s
340:	learn: 0.3096326	total: 2.41s	remaining: 4.66s
341:	learn: 0.3095469	total: 2.42s	remaining: 4.65s
342:	learn: 0.3094050	total: 2.42s	remaining: 4.64s
343:	learn: 0.3093669	total: 2.42s	remaining: 4.62s
344:	learn: 0.3092760	total: 2.43s	remaining: 4.61s
345:	learn: 0.3092479	total: 2.43s	remaining: 4.59s
346:	learn: 0.3091811	total: 2.43s	remaining: 4.58s
347:	learn: 0.3091256	total: 2.44s	remaining: 4.57s
348:	learn: 0.3090421	total: 2.44s	remaining: 4.55s
349:	learn: 0.3089462	total: 2.44s	remaining: 4.54s
350:	learn: 0.3088729	total: 2.45s	remaining: 4.53s
351:	learn: 0.3087450	total: 2.45s	remaining: 4.51s
352:	learn: 0.3085864	total: 2.45s	remaining: 4.5s
353:	learn: 0.3084921	total: 2.46s	remaining: 4.49s
354:	learn: 0.3084590	total: 2.46s	remaining: 4.47s
355:	learn: 0.3084040	total: 2.46s	remaining: 4.46s
356:	learn: 0.3082500	total: 2.47s	remaining: 4.45s
357:	learn: 0.3082121	total: 2.47s	remaining: 4.43s
358:	learn: 0.3080720	total: 2.48s	remaining: 4.42s
359:	learn: 0.3079628	total: 2.48s	remaining: 4.41s
360:	learn: 0.3078232	total: 2.48s	remaining: 4.4s
361:	learn: 0.3076790	total: 2.49s	remaining: 4.38s
362:	learn: 0.3075300	total: 2.49s	remaining: 4.37s
363:	learn: 0.3073583	total: 2.5s	remaining: 4.36s
364:	learn: 0.3072551	total: 2.5s	remaining: 4.35s
365:	learn: 0.3071858	total: 2.5s	remaining: 4.34s
366:	learn: 0.3071309	total: 2.51s	remaining: 4.32s
367:	learn: 0.3070383	total: 2.51s	remaining: 4.31s
368:	learn: 0.3069253	total: 2.52s	remaining: 4.3s
369:	learn: 0.3067748	total: 2.52s	remaining: 4.29s
370:	learn: 0.3066094	total: 2.52s	remaining: 4.28s
371:	learn: 0.3065190	total: 2.53s	remaining: 4.27s
372:	learn: 0.3063980	total: 2.53s	remaining: 4.25s
373:	learn: 0.3062632	total: 2.54s	remaining: 4.24s
374:	learn: 0.3062007	total: 2.54s	remaining: 4.23s
375:	learn: 0.3061390	total: 2.54s	remaining: 4.22s
376:	learn: 0.3059594	total: 2.55s	remaining: 4.21s
377:	learn: 0.3058251	total: 2.55s	remaining: 4.2s
378:	learn: 0.3056869	total: 2.55s	remaining: 4.19s
379:	learn: 0.3055310	total: 2.56s	remaining: 4.17s
380:	learn: 0.3054141	total: 2.56s	remaining: 4.16s
381:	learn: 0.3052768	total: 2.57s	remaining: 4.15s
382:	learn: 0.3051520	total: 2.57s	remaining: 4.14s
383:	learn: 0.3050335	total: 2.58s	remaining: 4.13s
384:	learn: 0.3049538	total: 2.58s	remaining: 4.12s
385:	learn: 0.3048397	total: 2.58s	remaining: 4.11s
386:	learn: 0.3046875	total: 2.59s	remaining: 4.1s
387:	learn: 0.3046222	total: 2.6s	remaining: 4.09s
388:	learn: 0.3044529	total: 2.6s	remaining: 4.09s
389:	learn: 0.3043908	total: 2.61s	remaining: 4.09s
390:	learn: 0.3043139	total: 2.62s	remaining: 4.08s
391:	learn: 0.3042196	total: 2.63s	remaining: 4.08s
392:	learn: 0.3040177	total: 2.64s	remaining: 4.08s
393:	learn: 0.3038647	total: 2.65s	remaining: 4.07s
394:	learn: 0.3037975	total: 2.66s	remaining: 4.07s
395:	learn: 0.3037701	total: 2.67s	remaining: 4.07s
396:	learn: 0.3036817	total: 2.68s	remaining: 4.07s
397:	learn: 0.3036264	total: 2.69s	remaining: 4.06s
398:	learn: 0.3035698	total: 2.69s	remaining: 4.06s
399:	learn: 0.3034860	total: 2.7s	remaining: 4.06s
400:	learn: 0.3033896	total: 2.71s	remaining: 4.05s
401:	learn: 0.3032889	total: 2.72s	remaining: 4.05s
402:	learn: 0.3031718	total: 2.73s	remaining: 4.05s
403:	learn: 0.3030427	total: 2.74s	remaining: 4.04s
404:	learn: 0.3029463	total: 2.75s	remaining: 4.04s
405:	learn: 0.3028350	total: 2.76s	remaining: 4.04s
406:	learn: 0.3026423	total: 2.77s	remaining: 4.03s
407:	learn: 0.3025326	total: 2.78s	remaining: 4.03s
408:	learn: 0.3024145	total: 2.79s	remaining: 4.03s
409:	learn: 0.3023258	total: 2.8s	remaining: 4.03s
410:	learn: 0.3022104	total: 2.81s	remaining: 4.02s
411:	learn: 0.3021345	total: 2.81s	remaining: 4.02s
412:	learn: 0.3020033	total: 2.82s	remaining: 4.01s
413:	learn: 0.3019176	total: 2.83s	remaining: 4.01s
414:	learn: 0.3018215	total: 2.84s	remaining: 4.01s
415:	learn: 0.3017320	total: 2.85s	remaining: 4s
416:	learn: 0.3016541	total: 2.86s	remaining: 4s
417:	learn: 0.3015615	total: 2.87s	remaining: 4s
418:	learn: 0.3014437	total: 2.88s	remaining: 4s
419:	learn: 0.3013219	total: 2.89s	remaining: 3.99s
420:	learn: 0.3012285	total: 2.9s	remaining: 3.99s
421:	learn: 0.3011071	total: 2.91s	remaining: 3.98s
422:	learn: 0.3010415	total: 2.92s	remaining: 3.98s
423:	learn: 0.3009376	total: 2.92s	remaining: 3.97s
424:	learn: 0.3008289	total: 2.93s	remaining: 3.97s
425:	learn: 0.3007021	total: 2.94s	remaining: 3.96s
426:	learn: 0.3006938	total: 2.95s	remaining: 3.96s
427:	learn: 0.3006004	total: 2.96s	remaining: 3.95s
428:	learn: 0.3004513	total: 2.97s	remaining: 3.95s
429:	learn: 0.3003496	total: 2.98s	remaining: 3.95s
430:	learn: 0.3002434	total: 2.99s	remaining: 3.94s
431:	learn: 0.3001007	total: 3s	remaining: 3.94s
432:	learn: 0.3000084	total: 3s	remaining: 3.94s
433:	learn: 0.2998893	total: 3.02s	remaining: 3.93s
434:	learn: 0.2997875	total: 3.02s	remaining: 3.93s
435:	learn: 0.2996781	total: 3.03s	remaining: 3.92s
436:	learn: 0.2995975	total: 3.04s	remaining: 3.92s
437:	learn: 0.2994974	total: 3.05s	remaining: 3.92s
438:	learn: 0.2993404	total: 3.06s	remaining: 3.91s
439:	learn: 0.2992519	total: 3.06s	remaining: 3.9s
440:	learn: 0.2991481	total: 3.07s	remaining: 3.89s
441:	learn: 0.2990465	total: 3.08s	remaining: 3.88s
442:	learn: 0.2989874	total: 3.08s	remaining: 3.87s
443:	learn: 0.2989372	total: 3.08s	remaining: 3.86s
444:	learn: 0.2988662	total: 3.09s	remaining: 3.85s
445:	learn: 0.2987807	total: 3.09s	remaining: 3.84s
446:	learn: 0.2987130	total: 3.1s	remaining: 3.83s
447:	learn: 0.2986264	total: 3.1s	remaining: 3.82s
448:	learn: 0.2985494	total: 3.11s	remaining: 3.82s
449:	learn: 0.2984736	total: 3.12s	remaining: 3.81s
450:	learn: 0.2983464	total: 3.13s	remaining: 3.81s
451:	learn: 0.2982227	total: 3.14s	remaining: 3.81s
452:	learn: 0.2980751	total: 3.15s	remaining: 3.8s
453:	learn: 0.2979873	total: 3.16s	remaining: 3.8s
454:	learn: 0.2978423	total: 3.17s	remaining: 3.79s
455:	learn: 0.2977251	total: 3.18s	remaining: 3.79s
456:	learn: 0.2975998	total: 3.19s	remaining: 3.79s
457:	learn: 0.2974569	total: 3.2s	remaining: 3.78s
458:	learn: 0.2973750	total: 3.21s	remaining: 3.78s
459:	learn: 0.2972271	total: 3.22s	remaining: 3.78s
460:	learn: 0.2971703	total: 3.23s	remaining: 3.77s
461:	learn: 0.2970683	total: 3.23s	remaining: 3.77s
462:	learn: 0.2969953	total: 3.24s	remaining: 3.76s
463:	learn: 0.2968423	total: 3.25s	remaining: 3.76s
464:	learn: 0.2967673	total: 3.26s	remaining: 3.75s
465:	learn: 0.2966840	total: 3.27s	remaining: 3.75s
466:	learn: 0.2965610	total: 3.28s	remaining: 3.74s
467:	learn: 0.2964787	total: 3.29s	remaining: 3.74s
468:	learn: 0.2963708	total: 3.3s	remaining: 3.73s
469:	learn: 0.2962534	total: 3.3s	remaining: 3.73s
470:	learn: 0.2962048	total: 3.32s	remaining: 3.73s
471:	learn: 0.2961363	total: 3.33s	remaining: 3.73s
472:	learn: 0.2960244	total: 3.34s	remaining: 3.72s
473:	learn: 0.2959151	total: 3.34s	remaining: 3.71s
474:	learn: 0.2957871	total: 3.35s	remaining: 3.7s
475:	learn: 0.2956611	total: 3.35s	remaining: 3.69s
476:	learn: 0.2956091	total: 3.36s	remaining: 3.68s
477:	learn: 0.2955296	total: 3.37s	remaining: 3.67s
478:	learn: 0.2954390	total: 3.37s	remaining: 3.67s
479:	learn: 0.2953425	total: 3.38s	remaining: 3.66s
480:	learn: 0.2952103	total: 3.38s	remaining: 3.65s
481:	learn: 0.2951111	total: 3.38s	remaining: 3.64s
482:	learn: 0.2950186	total: 3.39s	remaining: 3.63s
483:	learn: 0.2949690	total: 3.39s	remaining: 3.62s
484:	learn: 0.2949057	total: 3.4s	remaining: 3.61s
485:	learn: 0.2947853	total: 3.4s	remaining: 3.6s
486:	learn: 0.2947011	total: 3.4s	remaining: 3.59s
487:	learn: 0.2945840	total: 3.41s	remaining: 3.58s
488:	learn: 0.2944623	total: 3.42s	remaining: 3.57s
489:	learn: 0.2943361	total: 3.42s	remaining: 3.56s
490:	learn: 0.2942289	total: 3.42s	remaining: 3.55s
491:	learn: 0.2940951	total: 3.43s	remaining: 3.54s
492:	learn: 0.2939964	total: 3.43s	remaining: 3.53s
493:	learn: 0.2939061	total: 3.44s	remaining: 3.52s
494:	learn: 0.2938412	total: 3.44s	remaining: 3.51s
495:	learn: 0.2937125	total: 3.45s	remaining: 3.5s
496:	learn: 0.2936311	total: 3.45s	remaining: 3.49s
497:	learn: 0.2935107	total: 3.46s	remaining: 3.48s
498:	learn: 0.2934645	total: 3.46s	remaining: 3.47s
499:	learn: 0.2934026	total: 3.47s	remaining: 3.47s
500:	learn: 0.2932897	total: 3.47s	remaining: 3.46s
501:	learn: 0.2931193	total: 3.48s	remaining: 3.45s
502:	learn: 0.2930248	total: 3.48s	remaining: 3.44s
503:	learn: 0.2929111	total: 3.49s	remaining: 3.43s
504:	learn: 0.2928346	total: 3.49s	remaining: 3.42s
505:	learn: 0.2927457	total: 3.5s	remaining: 3.41s
506:	learn: 0.2926199	total: 3.5s	remaining: 3.4s
507:	learn: 0.2924558	total: 3.5s	remaining: 3.4s
508:	learn: 0.2923327	total: 3.51s	remaining: 3.39s
509:	learn: 0.2922187	total: 3.51s	remaining: 3.38s
510:	learn: 0.2921262	total: 3.52s	remaining: 3.37s
511:	learn: 0.2920288	total: 3.52s	remaining: 3.36s
512:	learn: 0.2918969	total: 3.53s	remaining: 3.35s
513:	learn: 0.2917764	total: 3.53s	remaining: 3.34s
514:	learn: 0.2916619	total: 3.54s	remaining: 3.33s
515:	learn: 0.2915566	total: 3.54s	remaining: 3.32s
516:	learn: 0.2914227	total: 3.54s	remaining: 3.31s
517:	learn: 0.2912676	total: 3.55s	remaining: 3.3s
518:	learn: 0.2911634	total: 3.55s	remaining: 3.29s
519:	learn: 0.2910662	total: 3.56s	remaining: 3.28s
520:	learn: 0.2909592	total: 3.56s	remaining: 3.27s
521:	learn: 0.2908815	total: 3.56s	remaining: 3.26s
522:	learn: 0.2907531	total: 3.57s	remaining: 3.25s
523:	learn: 0.2906543	total: 3.58s	remaining: 3.25s
524:	learn: 0.2905950	total: 3.59s	remaining: 3.25s
525:	learn: 0.2905245	total: 3.6s	remaining: 3.24s
526:	learn: 0.2903954	total: 3.61s	remaining: 3.24s
527:	learn: 0.2902849	total: 3.62s	remaining: 3.23s
528:	learn: 0.2902138	total: 3.62s	remaining: 3.23s
529:	learn: 0.2901241	total: 3.63s	remaining: 3.22s
530:	learn: 0.2900428	total: 3.64s	remaining: 3.22s
531:	learn: 0.2899313	total: 3.65s	remaining: 3.21s
532:	learn: 0.2898218	total: 3.66s	remaining: 3.21s
533:	learn: 0.2897420	total: 3.67s	remaining: 3.21s
534:	learn: 0.2896562	total: 3.68s	remaining: 3.2s
535:	learn: 0.2895865	total: 3.69s	remaining: 3.2s
536:	learn: 0.2895407	total: 3.7s	remaining: 3.19s
537:	learn: 0.2894503	total: 3.71s	remaining: 3.19s
538:	learn: 0.2893767	total: 3.72s	remaining: 3.18s
539:	learn: 0.2892730	total: 3.72s	remaining: 3.17s
540:	learn: 0.2891772	total: 3.73s	remaining: 3.16s
541:	learn: 0.2890337	total: 3.73s	remaining: 3.15s
542:	learn: 0.2889305	total: 3.74s	remaining: 3.14s
543:	learn: 0.2887993	total: 3.74s	remaining: 3.13s
544:	learn: 0.2886943	total: 3.75s	remaining: 3.13s
545:	learn: 0.2886156	total: 3.75s	remaining: 3.12s
546:	learn: 0.2885417	total: 3.75s	remaining: 3.11s
547:	learn: 0.2884412	total: 3.76s	remaining: 3.1s
548:	learn: 0.2883201	total: 3.77s	remaining: 3.1s
549:	learn: 0.2882409	total: 3.78s	remaining: 3.09s
550:	learn: 0.2881434	total: 3.79s	remaining: 3.08s
551:	learn: 0.2880404	total: 3.79s	remaining: 3.08s
552:	learn: 0.2879466	total: 3.8s	remaining: 3.07s
553:	learn: 0.2877395	total: 3.81s	remaining: 3.07s
554:	learn: 0.2876476	total: 3.82s	remaining: 3.06s
555:	learn: 0.2875686	total: 3.83s	remaining: 3.06s
556:	learn: 0.2874862	total: 3.84s	remaining: 3.05s
557:	learn: 0.2874157	total: 3.85s	remaining: 3.05s
558:	learn: 0.2873172	total: 3.86s	remaining: 3.04s
559:	learn: 0.2872167	total: 3.87s	remaining: 3.04s
560:	learn: 0.2871140	total: 3.88s	remaining: 3.03s
561:	learn: 0.2869945	total: 3.89s	remaining: 3.03s
562:	learn: 0.2868732	total: 3.9s	remaining: 3.02s
563:	learn: 0.2867868	total: 3.91s	remaining: 3.02s
564:	learn: 0.2866821	total: 3.92s	remaining: 3.02s
565:	learn: 0.2865933	total: 3.92s	remaining: 3.01s
566:	learn: 0.2865053	total: 3.94s	remaining: 3s
567:	learn: 0.2864463	total: 3.94s	remaining: 3s
568:	learn: 0.2863378	total: 3.95s	remaining: 3s
569:	learn: 0.2862229	total: 3.96s	remaining: 2.99s
570:	learn: 0.2861036	total: 3.97s	remaining: 2.98s
571:	learn: 0.2860160	total: 3.98s	remaining: 2.98s
572:	learn: 0.2858844	total: 3.99s	remaining: 2.98s
573:	learn: 0.2858060	total: 4s	remaining: 2.97s
574:	learn: 0.2856835	total: 4.02s	remaining: 2.97s
575:	learn: 0.2855993	total: 4.03s	remaining: 2.97s
576:	learn: 0.2855265	total: 4.04s	remaining: 2.96s
577:	learn: 0.2854564	total: 4.05s	remaining: 2.96s
578:	learn: 0.2853940	total: 4.05s	remaining: 2.95s
579:	learn: 0.2852936	total: 4.06s	remaining: 2.94s
580:	learn: 0.2852307	total: 4.07s	remaining: 2.93s
581:	learn: 0.2851390	total: 4.07s	remaining: 2.92s
582:	learn: 0.2850113	total: 4.08s	remaining: 2.92s
583:	learn: 0.2849294	total: 4.08s	remaining: 2.91s
584:	learn: 0.2848589	total: 4.09s	remaining: 2.9s
585:	learn: 0.2847485	total: 4.09s	remaining: 2.89s
586:	learn: 0.2846705	total: 4.09s	remaining: 2.88s
587:	learn: 0.2845976	total: 4.1s	remaining: 2.87s
588:	learn: 0.2845416	total: 4.1s	remaining: 2.86s
589:	learn: 0.2844625	total: 4.11s	remaining: 2.85s
590:	learn: 0.2843851	total: 4.11s	remaining: 2.85s
591:	learn: 0.2843113	total: 4.12s	remaining: 2.84s
592:	learn: 0.2842058	total: 4.12s	remaining: 2.83s
593:	learn: 0.2841403	total: 4.13s	remaining: 2.82s
594:	learn: 0.2840540	total: 4.13s	remaining: 2.81s
595:	learn: 0.2839473	total: 4.14s	remaining: 2.8s
596:	learn: 0.2838765	total: 4.14s	remaining: 2.79s
597:	learn: 0.2837819	total: 4.14s	remaining: 2.79s
598:	learn: 0.2836960	total: 4.15s	remaining: 2.78s
599:	learn: 0.2836528	total: 4.15s	remaining: 2.77s
600:	learn: 0.2835407	total: 4.16s	remaining: 2.76s
601:	learn: 0.2834452	total: 4.16s	remaining: 2.75s
602:	learn: 0.2833838	total: 4.17s	remaining: 2.75s
603:	learn: 0.2832660	total: 4.18s	remaining: 2.74s
604:	learn: 0.2831515	total: 4.19s	remaining: 2.73s
605:	learn: 0.2830928	total: 4.2s	remaining: 2.73s
606:	learn: 0.2829314	total: 4.23s	remaining: 2.74s
607:	learn: 0.2828733	total: 4.24s	remaining: 2.73s
608:	learn: 0.2827808	total: 4.25s	remaining: 2.73s
609:	learn: 0.2827108	total: 4.25s	remaining: 2.72s
610:	learn: 0.2826603	total: 4.26s	remaining: 2.71s
611:	learn: 0.2825031	total: 4.26s	remaining: 2.7s
612:	learn: 0.2824529	total: 4.27s	remaining: 2.69s
613:	learn: 0.2823641	total: 4.27s	remaining: 2.69s
614:	learn: 0.2822964	total: 4.28s	remaining: 2.68s
615:	learn: 0.2822048	total: 4.28s	remaining: 2.67s
616:	learn: 0.2821105	total: 4.29s	remaining: 2.66s
617:	learn: 0.2820028	total: 4.29s	remaining: 2.65s
618:	learn: 0.2819367	total: 4.29s	remaining: 2.64s
619:	learn: 0.2818891	total: 4.3s	remaining: 2.63s
620:	learn: 0.2817211	total: 4.3s	remaining: 2.63s
621:	learn: 0.2816303	total: 4.31s	remaining: 2.62s
622:	learn: 0.2815774	total: 4.31s	remaining: 2.61s
623:	learn: 0.2814653	total: 4.31s	remaining: 2.6s
624:	learn: 0.2813823	total: 4.32s	remaining: 2.59s
625:	learn: 0.2813119	total: 4.32s	remaining: 2.58s
626:	learn: 0.2812364	total: 4.33s	remaining: 2.57s
627:	learn: 0.2811427	total: 4.33s	remaining: 2.56s
628:	learn: 0.2810593	total: 4.34s	remaining: 2.56s
629:	learn: 0.2809721	total: 4.34s	remaining: 2.55s
630:	learn: 0.2809046	total: 4.34s	remaining: 2.54s
631:	learn: 0.2808215	total: 4.35s	remaining: 2.53s
632:	learn: 0.2807199	total: 4.35s	remaining: 2.52s
633:	learn: 0.2806262	total: 4.36s	remaining: 2.51s
634:	learn: 0.2805548	total: 4.36s	remaining: 2.51s
635:	learn: 0.2804156	total: 4.36s	remaining: 2.5s
636:	learn: 0.2803183	total: 4.37s	remaining: 2.49s
637:	learn: 0.2802395	total: 4.37s	remaining: 2.48s
638:	learn: 0.2801385	total: 4.38s	remaining: 2.47s
639:	learn: 0.2800725	total: 4.38s	remaining: 2.46s
640:	learn: 0.2799708	total: 4.38s	remaining: 2.46s
641:	learn: 0.2798990	total: 4.39s	remaining: 2.45s
642:	learn: 0.2798233	total: 4.39s	remaining: 2.44s
643:	learn: 0.2797369	total: 4.4s	remaining: 2.43s
644:	learn: 0.2796633	total: 4.4s	remaining: 2.42s
645:	learn: 0.2795751	total: 4.41s	remaining: 2.41s
646:	learn: 0.2795282	total: 4.41s	remaining: 2.41s
647:	learn: 0.2794081	total: 4.41s	remaining: 2.4s
648:	learn: 0.2793437	total: 4.42s	remaining: 2.39s
649:	learn: 0.2792929	total: 4.42s	remaining: 2.38s
650:	learn: 0.2792069	total: 4.43s	remaining: 2.37s
651:	learn: 0.2790770	total: 4.43s	remaining: 2.37s
652:	learn: 0.2790302	total: 4.43s	remaining: 2.36s
653:	learn: 0.2789602	total: 4.44s	remaining: 2.35s
654:	learn: 0.2788745	total: 4.44s	remaining: 2.34s
655:	learn: 0.2787905	total: 4.45s	remaining: 2.33s
656:	learn: 0.2787113	total: 4.45s	remaining: 2.32s
657:	learn: 0.2786572	total: 4.46s	remaining: 2.32s
658:	learn: 0.2785915	total: 4.46s	remaining: 2.31s
659:	learn: 0.2784990	total: 4.46s	remaining: 2.3s
660:	learn: 0.2784227	total: 4.47s	remaining: 2.29s
661:	learn: 0.2783782	total: 4.47s	remaining: 2.28s
662:	learn: 0.2783016	total: 4.48s	remaining: 2.28s
663:	learn: 0.2781353	total: 4.48s	remaining: 2.27s
664:	learn: 0.2780473	total: 4.49s	remaining: 2.26s
665:	learn: 0.2779722	total: 4.49s	remaining: 2.25s
666:	learn: 0.2779149	total: 4.5s	remaining: 2.25s
667:	learn: 0.2777885	total: 4.5s	remaining: 2.24s
668:	learn: 0.2777324	total: 4.51s	remaining: 2.23s
669:	learn: 0.2776693	total: 4.51s	remaining: 2.22s
670:	learn: 0.2776066	total: 4.52s	remaining: 2.21s
671:	learn: 0.2774908	total: 4.52s	remaining: 2.21s
672:	learn: 0.2773998	total: 4.52s	remaining: 2.2s
673:	learn: 0.2772905	total: 4.53s	remaining: 2.19s
674:	learn: 0.2771788	total: 4.53s	remaining: 2.18s
675:	learn: 0.2771158	total: 4.54s	remaining: 2.17s
676:	learn: 0.2770078	total: 4.54s	remaining: 2.17s
677:	learn: 0.2769232	total: 4.55s	remaining: 2.16s
678:	learn: 0.2768194	total: 4.55s	remaining: 2.15s
679:	learn: 0.2766760	total: 4.56s	remaining: 2.15s
680:	learn: 0.2765857	total: 4.57s	remaining: 2.14s
681:	learn: 0.2765150	total: 4.57s	remaining: 2.13s
682:	learn: 0.2764351	total: 4.58s	remaining: 2.12s
683:	learn: 0.2762783	total: 4.58s	remaining: 2.12s
684:	learn: 0.2761259	total: 4.59s	remaining: 2.11s
685:	learn: 0.2760029	total: 4.59s	remaining: 2.1s
686:	learn: 0.2759465	total: 4.6s	remaining: 2.1s
687:	learn: 0.2758667	total: 4.6s	remaining: 2.09s
688:	learn: 0.2757980	total: 4.61s	remaining: 2.08s
689:	learn: 0.2756486	total: 4.61s	remaining: 2.07s
690:	learn: 0.2755316	total: 4.62s	remaining: 2.06s
691:	learn: 0.2754883	total: 4.62s	remaining: 2.06s
692:	learn: 0.2753833	total: 4.63s	remaining: 2.05s
693:	learn: 0.2752716	total: 4.63s	remaining: 2.04s
694:	learn: 0.2751810	total: 4.63s	remaining: 2.03s
695:	learn: 0.2751178	total: 4.64s	remaining: 2.03s
696:	learn: 0.2750040	total: 4.65s	remaining: 2.02s
697:	learn: 0.2748941	total: 4.65s	remaining: 2.01s
698:	learn: 0.2747493	total: 4.66s	remaining: 2s
699:	learn: 0.2746918	total: 4.66s	remaining: 2s
700:	learn: 0.2745983	total: 4.67s	remaining: 1.99s
701:	learn: 0.2745402	total: 4.67s	remaining: 1.98s
702:	learn: 0.2744257	total: 4.68s	remaining: 1.98s
703:	learn: 0.2742849	total: 4.68s	remaining: 1.97s
704:	learn: 0.2742074	total: 4.69s	remaining: 1.96s
705:	learn: 0.2740762	total: 4.69s	remaining: 1.95s
706:	learn: 0.2739973	total: 4.7s	remaining: 1.95s
707:	learn: 0.2739458	total: 4.7s	remaining: 1.94s
708:	learn: 0.2738750	total: 4.71s	remaining: 1.93s
709:	learn: 0.2737698	total: 4.71s	remaining: 1.92s
710:	learn: 0.2737096	total: 4.72s	remaining: 1.92s
711:	learn: 0.2736591	total: 4.72s	remaining: 1.91s
712:	learn: 0.2735751	total: 4.73s	remaining: 1.9s
713:	learn: 0.2734911	total: 4.73s	remaining: 1.9s
714:	learn: 0.2734593	total: 4.74s	remaining: 1.89s
715:	learn: 0.2734084	total: 4.74s	remaining: 1.88s
716:	learn: 0.2733372	total: 4.75s	remaining: 1.87s
717:	learn: 0.2732869	total: 4.75s	remaining: 1.87s
718:	learn: 0.2732282	total: 4.76s	remaining: 1.86s
719:	learn: 0.2731170	total: 4.76s	remaining: 1.85s
720:	learn: 0.2730153	total: 4.77s	remaining: 1.84s
721:	learn: 0.2729459	total: 4.77s	remaining: 1.84s
722:	learn: 0.2728210	total: 4.78s	remaining: 1.83s
723:	learn: 0.2727603	total: 4.78s	remaining: 1.82s
724:	learn: 0.2726774	total: 4.78s	remaining: 1.81s
725:	learn: 0.2726167	total: 4.79s	remaining: 1.81s
726:	learn: 0.2725376	total: 4.79s	remaining: 1.8s
727:	learn: 0.2724588	total: 4.8s	remaining: 1.79s
728:	learn: 0.2724048	total: 4.8s	remaining: 1.78s
729:	learn: 0.2723073	total: 4.8s	remaining: 1.78s
730:	learn: 0.2722154	total: 4.81s	remaining: 1.77s
731:	learn: 0.2721166	total: 4.81s	remaining: 1.76s
732:	learn: 0.2720204	total: 4.82s	remaining: 1.75s
733:	learn: 0.2719697	total: 4.82s	remaining: 1.75s
734:	learn: 0.2719039	total: 4.83s	remaining: 1.74s
735:	learn: 0.2718518	total: 4.83s	remaining: 1.73s
736:	learn: 0.2717846	total: 4.83s	remaining: 1.73s
737:	learn: 0.2717153	total: 4.84s	remaining: 1.72s
738:	learn: 0.2716464	total: 4.84s	remaining: 1.71s
739:	learn: 0.2715714	total: 4.85s	remaining: 1.7s
740:	learn: 0.2714827	total: 4.85s	remaining: 1.7s
741:	learn: 0.2714391	total: 4.85s	remaining: 1.69s
742:	learn: 0.2713569	total: 4.86s	remaining: 1.68s
743:	learn: 0.2713039	total: 4.86s	remaining: 1.67s
744:	learn: 0.2712420	total: 4.87s	remaining: 1.67s
745:	learn: 0.2711867	total: 4.87s	remaining: 1.66s
746:	learn: 0.2710927	total: 4.88s	remaining: 1.65s
747:	learn: 0.2710207	total: 4.88s	remaining: 1.64s
748:	learn: 0.2709368	total: 4.89s	remaining: 1.64s
749:	learn: 0.2708850	total: 4.89s	remaining: 1.63s
750:	learn: 0.2708343	total: 4.89s	remaining: 1.62s
751:	learn: 0.2707758	total: 4.9s	remaining: 1.61s
752:	learn: 0.2706771	total: 4.9s	remaining: 1.61s
753:	learn: 0.2705711	total: 4.91s	remaining: 1.6s
754:	learn: 0.2705258	total: 4.91s	remaining: 1.59s
755:	learn: 0.2704630	total: 4.92s	remaining: 1.59s
756:	learn: 0.2703908	total: 4.92s	remaining: 1.58s
757:	learn: 0.2703432	total: 4.93s	remaining: 1.57s
758:	learn: 0.2702577	total: 4.94s	remaining: 1.57s
759:	learn: 0.2701822	total: 4.94s	remaining: 1.56s
760:	learn: 0.2700855	total: 4.95s	remaining: 1.55s
761:	learn: 0.2700212	total: 4.96s	remaining: 1.55s
762:	learn: 0.2699146	total: 4.96s	remaining: 1.54s
763:	learn: 0.2698429	total: 4.96s	remaining: 1.53s
764:	learn: 0.2697872	total: 4.97s	remaining: 1.53s
765:	learn: 0.2697283	total: 4.97s	remaining: 1.52s
766:	learn: 0.2696715	total: 4.98s	remaining: 1.51s
767:	learn: 0.2696016	total: 4.98s	remaining: 1.5s
768:	learn: 0.2695397	total: 4.99s	remaining: 1.5s
769:	learn: 0.2694625	total: 4.99s	remaining: 1.49s
770:	learn: 0.2693965	total: 5s	remaining: 1.49s
771:	learn: 0.2692862	total: 5.01s	remaining: 1.48s
772:	learn: 0.2692184	total: 5.01s	remaining: 1.47s
773:	learn: 0.2691259	total: 5.02s	remaining: 1.47s
774:	learn: 0.2690679	total: 5.02s	remaining: 1.46s
775:	learn: 0.2689816	total: 5.03s	remaining: 1.45s
776:	learn: 0.2688540	total: 5.03s	remaining: 1.44s
777:	learn: 0.2687935	total: 5.04s	remaining: 1.44s
778:	learn: 0.2687387	total: 5.04s	remaining: 1.43s
779:	learn: 0.2686777	total: 5.05s	remaining: 1.42s
780:	learn: 0.2686343	total: 5.05s	remaining: 1.42s
781:	learn: 0.2685354	total: 5.06s	remaining: 1.41s
782:	learn: 0.2684615	total: 5.06s	remaining: 1.4s
783:	learn: 0.2684339	total: 5.07s	remaining: 1.4s
784:	learn: 0.2683235	total: 5.07s	remaining: 1.39s
785:	learn: 0.2681924	total: 5.08s	remaining: 1.38s
786:	learn: 0.2681297	total: 5.08s	remaining: 1.37s
787:	learn: 0.2680782	total: 5.08s	remaining: 1.37s
788:	learn: 0.2679827	total: 5.09s	remaining: 1.36s
789:	learn: 0.2679193	total: 5.09s	remaining: 1.35s
790:	learn: 0.2677850	total: 5.1s	remaining: 1.35s
791:	learn: 0.2677466	total: 5.1s	remaining: 1.34s
792:	learn: 0.2676377	total: 5.11s	remaining: 1.33s
793:	learn: 0.2675598	total: 5.11s	remaining: 1.33s
794:	learn: 0.2675010	total: 5.12s	remaining: 1.32s
795:	learn: 0.2673858	total: 5.12s	remaining: 1.31s
796:	learn: 0.2673166	total: 5.12s	remaining: 1.3s
797:	learn: 0.2672279	total: 5.13s	remaining: 1.3s
798:	learn: 0.2671700	total: 5.13s	remaining: 1.29s
799:	learn: 0.2671118	total: 5.14s	remaining: 1.28s
800:	learn: 0.2670035	total: 5.14s	remaining: 1.28s
801:	learn: 0.2669099	total: 5.14s	remaining: 1.27s
802:	learn: 0.2668207	total: 5.15s	remaining: 1.26s
803:	learn: 0.2667587	total: 5.15s	remaining: 1.26s
804:	learn: 0.2666636	total: 5.16s	remaining: 1.25s
805:	learn: 0.2665931	total: 5.16s	remaining: 1.24s
806:	learn: 0.2665252	total: 5.17s	remaining: 1.24s
807:	learn: 0.2664580	total: 5.17s	remaining: 1.23s
808:	learn: 0.2663930	total: 5.18s	remaining: 1.22s
809:	learn: 0.2663516	total: 5.18s	remaining: 1.22s
810:	learn: 0.2662270	total: 5.18s	remaining: 1.21s
811:	learn: 0.2661684	total: 5.19s	remaining: 1.2s
812:	learn: 0.2660630	total: 5.19s	remaining: 1.19s
813:	learn: 0.2660130	total: 5.2s	remaining: 1.19s
814:	learn: 0.2659534	total: 5.2s	remaining: 1.18s
815:	learn: 0.2658742	total: 5.21s	remaining: 1.17s
816:	learn: 0.2658373	total: 5.21s	remaining: 1.17s
817:	learn: 0.2657818	total: 5.22s	remaining: 1.16s
818:	learn: 0.2657000	total: 5.22s	remaining: 1.15s
819:	learn: 0.2656448	total: 5.23s	remaining: 1.15s
820:	learn: 0.2655612	total: 5.23s	remaining: 1.14s
821:	learn: 0.2654823	total: 5.24s	remaining: 1.13s
822:	learn: 0.2654130	total: 5.25s	remaining: 1.13s
823:	learn: 0.2653157	total: 5.25s	remaining: 1.12s
824:	learn: 0.2652391	total: 5.25s	remaining: 1.11s
825:	learn: 0.2651645	total: 5.26s	remaining: 1.11s
826:	learn: 0.2650606	total: 5.27s	remaining: 1.1s
827:	learn: 0.2649992	total: 5.27s	remaining: 1.09s
828:	learn: 0.2649671	total: 5.28s	remaining: 1.09s
829:	learn: 0.2649114	total: 5.28s	remaining: 1.08s
830:	learn: 0.2648525	total: 5.29s	remaining: 1.07s
831:	learn: 0.2647070	total: 5.29s	remaining: 1.07s
832:	learn: 0.2645870	total: 5.3s	remaining: 1.06s
833:	learn: 0.2645407	total: 5.3s	remaining: 1.05s
834:	learn: 0.2644719	total: 5.3s	remaining: 1.05s
835:	learn: 0.2644201	total: 5.31s	remaining: 1.04s
836:	learn: 0.2643622	total: 5.31s	remaining: 1.03s
837:	learn: 0.2643065	total: 5.32s	remaining: 1.03s
838:	learn: 0.2642099	total: 5.32s	remaining: 1.02s
839:	learn: 0.2641582	total: 5.33s	remaining: 1.01s
840:	learn: 0.2640918	total: 5.33s	remaining: 1.01s
841:	learn: 0.2640410	total: 5.34s	remaining: 1s
842:	learn: 0.2639768	total: 5.34s	remaining: 995ms
843:	learn: 0.2638768	total: 5.35s	remaining: 988ms
844:	learn: 0.2637406	total: 5.35s	remaining: 982ms
845:	learn: 0.2636876	total: 5.36s	remaining: 975ms
846:	learn: 0.2636275	total: 5.36s	remaining: 968ms
847:	learn: 0.2635580	total: 5.36s	remaining: 961ms
848:	learn: 0.2634902	total: 5.37s	remaining: 955ms
849:	learn: 0.2633956	total: 5.37s	remaining: 948ms
850:	learn: 0.2632845	total: 5.38s	remaining: 941ms
851:	learn: 0.2632414	total: 5.38s	remaining: 934ms
852:	learn: 0.2631551	total: 5.38s	remaining: 928ms
853:	learn: 0.2630801	total: 5.39s	remaining: 921ms
854:	learn: 0.2630007	total: 5.39s	remaining: 914ms
855:	learn: 0.2629505	total: 5.39s	remaining: 908ms
856:	learn: 0.2628751	total: 5.4s	remaining: 901ms
857:	learn: 0.2627822	total: 5.4s	remaining: 894ms
858:	learn: 0.2626723	total: 5.41s	remaining: 888ms
859:	learn: 0.2625537	total: 5.41s	remaining: 881ms
860:	learn: 0.2624292	total: 5.42s	remaining: 875ms
861:	learn: 0.2623599	total: 5.42s	remaining: 868ms
862:	learn: 0.2622783	total: 5.43s	remaining: 861ms
863:	learn: 0.2622138	total: 5.43s	remaining: 855ms
864:	learn: 0.2621490	total: 5.43s	remaining: 848ms
865:	learn: 0.2620780	total: 5.44s	remaining: 842ms
866:	learn: 0.2620261	total: 5.44s	remaining: 835ms
867:	learn: 0.2619579	total: 5.45s	remaining: 829ms
868:	learn: 0.2618905	total: 5.45s	remaining: 822ms
869:	learn: 0.2618513	total: 5.46s	remaining: 815ms
870:	learn: 0.2618037	total: 5.46s	remaining: 809ms
871:	learn: 0.2617097	total: 5.46s	remaining: 802ms
872:	learn: 0.2616788	total: 5.47s	remaining: 796ms
873:	learn: 0.2616112	total: 5.47s	remaining: 789ms
874:	learn: 0.2615679	total: 5.48s	remaining: 783ms
875:	learn: 0.2614451	total: 5.48s	remaining: 776ms
876:	learn: 0.2613943	total: 5.49s	remaining: 770ms
877:	learn: 0.2613394	total: 5.49s	remaining: 763ms
878:	learn: 0.2612693	total: 5.5s	remaining: 757ms
879:	learn: 0.2612308	total: 5.5s	remaining: 750ms
880:	learn: 0.2611888	total: 5.5s	remaining: 744ms
881:	learn: 0.2610938	total: 5.51s	remaining: 737ms
882:	learn: 0.2610133	total: 5.51s	remaining: 731ms
883:	learn: 0.2609138	total: 5.52s	remaining: 724ms
884:	learn: 0.2608485	total: 5.52s	remaining: 718ms
885:	learn: 0.2607640	total: 5.53s	remaining: 711ms
886:	learn: 0.2607019	total: 5.53s	remaining: 705ms
887:	learn: 0.2606569	total: 5.54s	remaining: 698ms
888:	learn: 0.2605785	total: 5.54s	remaining: 692ms
889:	learn: 0.2605225	total: 5.54s	remaining: 685ms
890:	learn: 0.2604375	total: 5.55s	remaining: 679ms
891:	learn: 0.2604081	total: 5.55s	remaining: 673ms
892:	learn: 0.2603203	total: 5.56s	remaining: 666ms
893:	learn: 0.2602621	total: 5.56s	remaining: 660ms
894:	learn: 0.2601646	total: 5.57s	remaining: 653ms
895:	learn: 0.2600754	total: 5.57s	remaining: 647ms
896:	learn: 0.2600146	total: 5.58s	remaining: 640ms
897:	learn: 0.2599581	total: 5.58s	remaining: 634ms
898:	learn: 0.2598881	total: 5.58s	remaining: 627ms
899:	learn: 0.2598400	total: 5.59s	remaining: 621ms
900:	learn: 0.2597987	total: 5.59s	remaining: 615ms
901:	learn: 0.2597250	total: 5.6s	remaining: 608ms
902:	learn: 0.2596734	total: 5.6s	remaining: 602ms
903:	learn: 0.2596428	total: 5.61s	remaining: 596ms
904:	learn: 0.2595920	total: 5.61s	remaining: 589ms
905:	learn: 0.2595029	total: 5.62s	remaining: 583ms
906:	learn: 0.2594493	total: 5.62s	remaining: 576ms
907:	learn: 0.2593817	total: 5.63s	remaining: 570ms
908:	learn: 0.2593057	total: 5.63s	remaining: 564ms
909:	learn: 0.2592245	total: 5.63s	remaining: 557ms
910:	learn: 0.2591156	total: 5.64s	remaining: 551ms
911:	learn: 0.2590409	total: 5.64s	remaining: 544ms
912:	learn: 0.2589414	total: 5.64s	remaining: 538ms
913:	learn: 0.2588972	total: 5.65s	remaining: 532ms
914:	learn: 0.2588504	total: 5.65s	remaining: 525ms
915:	learn: 0.2587540	total: 5.66s	remaining: 519ms
916:	learn: 0.2587154	total: 5.66s	remaining: 512ms
917:	learn: 0.2586561	total: 5.66s	remaining: 506ms
918:	learn: 0.2585237	total: 5.67s	remaining: 500ms
919:	learn: 0.2584533	total: 5.67s	remaining: 493ms
920:	learn: 0.2583551	total: 5.68s	remaining: 487ms
921:	learn: 0.2582825	total: 5.68s	remaining: 481ms
922:	learn: 0.2582125	total: 5.68s	remaining: 474ms
923:	learn: 0.2581555	total: 5.69s	remaining: 468ms
924:	learn: 0.2580666	total: 5.69s	remaining: 461ms
925:	learn: 0.2580023	total: 5.69s	remaining: 455ms
926:	learn: 0.2579397	total: 5.7s	remaining: 449ms
927:	learn: 0.2578873	total: 5.7s	remaining: 442ms
928:	learn: 0.2577930	total: 5.7s	remaining: 436ms
929:	learn: 0.2577023	total: 5.71s	remaining: 429ms
930:	learn: 0.2576065	total: 5.71s	remaining: 423ms
931:	learn: 0.2575277	total: 5.71s	remaining: 417ms
932:	learn: 0.2574777	total: 5.71s	remaining: 410ms
933:	learn: 0.2574126	total: 5.72s	remaining: 404ms
934:	learn: 0.2573502	total: 5.72s	remaining: 398ms
935:	learn: 0.2573180	total: 5.72s	remaining: 391ms
936:	learn: 0.2572575	total: 5.73s	remaining: 385ms
937:	learn: 0.2571548	total: 5.73s	remaining: 379ms
938:	learn: 0.2571074	total: 5.73s	remaining: 372ms
939:	learn: 0.2570166	total: 5.74s	remaining: 366ms
940:	learn: 0.2569184	total: 5.74s	remaining: 360ms
941:	learn: 0.2568613	total: 5.74s	remaining: 354ms
942:	learn: 0.2567470	total: 5.75s	remaining: 347ms
943:	learn: 0.2566585	total: 5.75s	remaining: 341ms
944:	learn: 0.2565452	total: 5.76s	remaining: 335ms
945:	learn: 0.2564959	total: 5.76s	remaining: 329ms
946:	learn: 0.2564463	total: 5.76s	remaining: 323ms
947:	learn: 0.2563467	total: 5.77s	remaining: 316ms
948:	learn: 0.2561945	total: 5.77s	remaining: 310ms
949:	learn: 0.2561240	total: 5.78s	remaining: 304ms
950:	learn: 0.2560678	total: 5.78s	remaining: 298ms
951:	learn: 0.2560372	total: 5.78s	remaining: 292ms
952:	learn: 0.2560042	total: 5.79s	remaining: 285ms
953:	learn: 0.2559674	total: 5.79s	remaining: 279ms
954:	learn: 0.2558679	total: 5.79s	remaining: 273ms
955:	learn: 0.2558129	total: 5.8s	remaining: 267ms
956:	learn: 0.2557581	total: 5.8s	remaining: 261ms
957:	learn: 0.2556811	total: 5.81s	remaining: 255ms
958:	learn: 0.2555795	total: 5.81s	remaining: 248ms
959:	learn: 0.2555395	total: 5.81s	remaining: 242ms
960:	learn: 0.2554608	total: 5.82s	remaining: 236ms
961:	learn: 0.2553999	total: 5.82s	remaining: 230ms
962:	learn: 0.2552898	total: 5.83s	remaining: 224ms
963:	learn: 0.2552292	total: 5.83s	remaining: 218ms
964:	learn: 0.2551757	total: 5.83s	remaining: 212ms
965:	learn: 0.2551375	total: 5.84s	remaining: 205ms
966:	learn: 0.2550936	total: 5.84s	remaining: 199ms
967:	learn: 0.2550500	total: 5.84s	remaining: 193ms
968:	learn: 0.2550072	total: 5.85s	remaining: 187ms
969:	learn: 0.2549374	total: 5.85s	remaining: 181ms
970:	learn: 0.2548819	total: 5.85s	remaining: 175ms
971:	learn: 0.2548162	total: 5.86s	remaining: 169ms
972:	learn: 0.2547802	total: 5.86s	remaining: 163ms
973:	learn: 0.2547170	total: 5.86s	remaining: 157ms
974:	learn: 0.2546422	total: 5.87s	remaining: 150ms
975:	learn: 0.2545924	total: 5.87s	remaining: 144ms
976:	learn: 0.2545131	total: 5.87s	remaining: 138ms
977:	learn: 0.2544432	total: 5.88s	remaining: 132ms
978:	learn: 0.2543650	total: 5.88s	remaining: 126ms
979:	learn: 0.2543077	total: 5.88s	remaining: 120ms
980:	learn: 0.2542234	total: 5.88s	remaining: 114ms
981:	learn: 0.2541606	total: 5.89s	remaining: 108ms
982:	learn: 0.2540974	total: 5.89s	remaining: 102ms
983:	learn: 0.2540196	total: 5.89s	remaining: 95.9ms
984:	learn: 0.2539876	total: 5.9s	remaining: 89.8ms
985:	learn: 0.2539653	total: 5.9s	remaining: 83.8ms
986:	learn: 0.2539087	total: 5.9s	remaining: 77.8ms
987:	learn: 0.2538602	total: 5.91s	remaining: 71.8ms
988:	learn: 0.2538077	total: 5.91s	remaining: 65.7ms
989:	learn: 0.2537299	total: 5.91s	remaining: 59.7ms
990:	learn: 0.2536897	total: 5.92s	remaining: 53.7ms
991:	learn: 0.2536434	total: 5.92s	remaining: 47.8ms
992:	learn: 0.2535728	total: 5.92s	remaining: 41.8ms
993:	learn: 0.2534649	total: 5.93s	remaining: 35.8ms
994:	learn: 0.2534253	total: 5.93s	remaining: 29.8ms
995:	learn: 0.2533765	total: 5.93s	remaining: 23.8ms
996:	learn: 0.2533113	total: 5.94s	remaining: 17.9ms
997:	learn: 0.2532057	total: 5.94s	remaining: 11.9ms
998:	learn: 0.2531779	total: 5.95s	remaining: 5.95ms
999:	learn: 0.2531354	total: 5.95s	remaining: 0us
In [184]:
y_pred = cat_model.predict(x_test)
catb_final_score =(accuracy_score(y_test,y_pred)*100)
catb_final_score 
Out[184]:
86.96666666666667
In [185]:
catb_cm = confusion_matrix(y_test,y_pred)
catb_cm
Out[185]:
array([[2319,   97],
       [ 294,  290]], dtype=int64)
Standart Scaler Effect
In [186]:
## We will use also scaler for improving the score of ML algorithms
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(x)
x_scaled = scaler.transform(x)
In [187]:
x_train,x_test,y_train,y_test = train_test_split(x_scaled,y,
                                                test_size = 0.30,
                                                random_state = 42)
KNN
In [188]:
knn_params = {"n_neighbors":np.arange(1,50),
              "weights": ["uniform","distance"],
              "metric":["euclidean","manhattan"]}

knn =KNeighborsClassifier()
knn_cv = GridSearchCV(knn,knn_params,cv = 5)
knn_cv = knn_cv.fit(x_train,y_train)
print("Best Parameters:"+str(knn_cv.best_params_))
Best Parameters:{'metric': 'manhattan', 'n_neighbors': 11, 'weights': 'uniform'}
In [189]:
knn_scaled =KNeighborsClassifier(n_neighbors =29,metric='manhattan',weights='distance')
knn_scaled = knn_scaled.fit(x_train,y_train)
y_pred = knn_scaled.predict(x_test)
knn_sscore = (accuracy_score(y_test,y_pred)*100)
knn_sscore
Out[189]:
84.46666666666667
Confusion Matrix
In [190]:
knn_scaled_conf = confusion_matrix(y_test,y_pred)
knn_scaled_conf
Out[190]:
array([[2320,   96],
       [ 370,  214]], dtype=int64)
Support Vector Machines
In [192]:
svm_scaled_linear = SVC(kernel='linear').fit(x_train,y_train)
svm_scaled_poly = SVC(kernel='poly').fit(x_train,y_train)
svm_scaled_rbf = SVC(kernel='rbf').fit(x_train,y_train)
In [193]:
y_pred_slinear = svm_scaled_linear.predict(x_test)
y_pred_spoly = svm_scaled_poly.predict(x_test)
y_pred_srbf = svm_scaled_rbf.predict(x_test)

print(accuracy_score(y_test,y_pred_slinear))
print(accuracy_score(y_test,y_pred_spoly))
print(accuracy_score(y_test,y_pred_srbf))
0.8186666666666667
0.8623333333333333
0.862
In [194]:
svc_params = {"C": [10,50,100,500,700],
              'kernel':['poly','rbf'],
              "gamma": [0.001, 0.01, 0.1]} 
                 
svc = SVC()
svc_cv_model = GridSearchCV(svc,svc_params,
                            cv = 5,
                           n_jobs = -1,
                           verbose = 2)
svc_cv_model.fit(x_train,y_train)
print("Best Parameters:"+str(svc_cv_model.best_params_))
Fitting 5 folds for each of 30 candidates, totalling 150 fits
[Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers.
[Parallel(n_jobs=-1)]: Done  25 tasks      | elapsed:   10.3s
[Parallel(n_jobs=-1)]: Done 150 out of 150 | elapsed: 27.6min finished
Best Parameters:{'C': 700, 'gamma': 0.01, 'kernel': 'rbf'}
In [195]:
svc_scaled = SVC(kernel = 'rbf',C = 700, gamma = 0.01)
scaled = svc_scaled.fit(x_train,y_train)
y_pred = scaled.predict(x_test)
In [196]:
svm_scaled_score = (accuracy_score(y_test,y_pred)*100)
svm_scaled_score
Out[196]:
86.8
Confusion Matrix
In [197]:
svc_scaled_conf = confusion_matrix(y_test,y_pred)
svc_scaled_conf
Out[197]:
array([[2317,   99],
       [ 297,  287]], dtype=int64)
Random Forest
In [198]:
rf_params  = {'max_depth':list(range(1,11)),
             "max_features":["log2","auto","sqrt"],
             "n_estimators":[2,10,20,50,150,300],
             'criterion' : ['gini','entropy'],
             'min_samples_leaf' : [1,3,5,10]}
In [199]:
rf_model = RandomForestClassifier(random_state = 42)
rf_cv_model = GridSearchCV(rf_model,
                           rf_params,
                           cv = 5,
                           n_jobs = -1)
rf_cv_model.fit(x_train,y_train)
rf_cv_model.best_params_
Out[199]:
{'criterion': 'gini',
 'max_depth': 10,
 'max_features': 'log2',
 'min_samples_leaf': 1,
 'n_estimators': 300}
In [200]:
rf_tuned = RandomForestClassifier(max_depth = 10,
                                  criterion = 'gini',
                                  max_features = 'log2',
                                  min_samples_leaf = 1,
                                  n_estimators = 150,random_state = 42)
rf_tuned = rf_tuned.fit(x_train,y_train)
y_pred  = rf_tuned.predict(x_test)
rf_scaled_score = (accuracy_score(y_test,y_pred)*100)
rf_scaled_score
Out[200]:
86.7
Confusion Matrix
In [201]:
rf_scaled_conf = confusion_matrix(y_test,y_pred)
rf_scaled_conf
Out[201]:
array([[2347,   69],
       [ 330,  254]], dtype=int64)
LightGBM
In [202]:
lgbm_params = {"learning_rate" : [0.01, 0.02,0.1],
             "n_estimators": [100,200,300,500,1000],
             "max_depth": [2,3,5,7],
             "min_child_samples": [1,2,5,10]}
lgbm = LGBMClassifier()
lgbm_cv = GridSearchCV(lgbm,lgbm_params,verbose=0,n_jobs=-1,cv=5)
lgbm_cv_model = lgbm_cv.fit(x_train,y_train)
lgbm_cv_model.best_params_
Out[202]:
{'learning_rate': 0.02,
 'max_depth': 2,
 'min_child_samples': 2,
 'n_estimators': 1000}
In [203]:
lgbm = LGBMClassifier(learning_rate=0.02,
                      max_depth=5,min_child_samples=5,
                      n_estimators=500,random_state = 42)
lgbm_tuned = lgbm.fit(x_train,y_train)
y_pred = lgbm_tuned.predict(x_test)
lgbm_scaled_acc = (accuracy_score(y_test,y_pred)*100)
lgbm_scaled_acc
Out[203]:
86.9
Confusion Matrix
In [204]:
lgbm_scaled_conf = confusion_matrix(y_test,y_pred)
lgbm_scaled_conf
Out[204]:
array([[2325,   91],
       [ 302,  282]], dtype=int64)

Logistic Regression ROC Curve

In [ ]:
logit_roc_auc = roc_auc_score(y_test,log_reg_tuned.predict(x_test))

fpr, tpr, tresholds = roc_curve(y_test,log_reg_tuned.predict_proba(x_test)[:,1])
plt.figure(figsize=(6,6))
plt.plot(fpr,tpr,label = "AUC (area = %0.2f)"%logit_roc_auc)
plt.plot([0,1],[0,1],"r--")
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.xlabel("False Positive Ratio")
plt.ylabel("True Positive Ratio")
plt.title('ROC Curve');
In [205]:
from sklearn.metrics import roc_auc_score
roc_auc_score(y_test,y_probs)
Out[205]:
0.7926055180078019
  • As we can see our Roc curve is not much close to left top corner. This means our prediction score is a bit lover than expected.

Models Comparison

Confusion Matrix Comparison
In [206]:
fig = plt.figure(figsize=(15,15))

ax1 = fig.add_subplot(4, 4, 1) # row, column, position
ax1.set_title('Logistic Regression Classification')

ax2 = fig.add_subplot(4, 4, 2) # row, column, position
ax2.set_title('KNN Classification')

ax3 = fig.add_subplot(4, 4, 3)
ax3.set_title('SVM Classification')

ax4 = fig.add_subplot(4, 4, 4)
ax4.set_title('Naive Bayes Classification')

ax5 = fig.add_subplot(4, 4, 5)
ax5.set_title('Random Forest Classification')

ax6 = fig.add_subplot(4, 4, 6)
ax6.set_title('GBM Classification')

ax7 = fig.add_subplot(4, 4, 7)
ax7.set_title('LightGBM Classification')

ax8 = fig.add_subplot(4, 4, 8)
ax8.set_title('XGBoost Classification')

ax9 = fig.add_subplot(4, 4, 9)
ax9.set_title('CatBoost Classification')

ax10 = fig.add_subplot(4, 4, 10)
ax10.set_title('KNN Scaled Classification')

ax11 = fig.add_subplot(4,4, 11)
ax11.set_title('SVC Scaled Classification')

ax12 = fig.add_subplot(4,4, 12)
ax12.set_title('Random Forest Scaled Classification')

ax13 = fig.add_subplot(4, 4, 13)
ax13.set_title('LightGBM Scaled Classification')


sns.heatmap(data=lr_cm, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax1, cmap='magma')
sns.heatmap(data=knn_cm, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax2, cmap='magma')   
sns.heatmap(data=svm_cm, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax3, cmap='magma')
sns.heatmap(data=nb_cm, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax4, cmap='magma')
sns.heatmap(data=rf_cm, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax5, cmap='magma')
sns.heatmap(data=gbm_cm, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax6, cmap='magma')
sns.heatmap(data=lgbm_cm, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax7, cmap='magma')
sns.heatmap(data=xgbm_cm, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax8, cmap='magma')
sns.heatmap(data=catb_cm, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax9, cmap='magma')
sns.heatmap(data=knn_scaled_conf, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax10, cmap='magma')
sns.heatmap(data=svc_scaled_conf, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax11, cmap='magma')
sns.heatmap(data=rf_scaled_conf, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax12, cmap='magma')
sns.heatmap(data=lgbm_scaled_conf, annot=True, linewidth=0.7, linecolor='cyan', fmt='.0f', ax=ax13, cmap='magma')
plt.show()

Model Score Comparison

In [207]:
indexx = ["Log","RF","KNN","SVM","NB","GBM","LightGBM","XGBoost",'CatBoost',"KNN Scaled","SVM Scaled", 'RF Scaled',"LightGBM Scaled"]
regressions = [log_tuned_score,rf_tuned_score,knn_tuned,svc_rbf_score,nb_tuned,gbm_tuned_score,
               lgbm_tuned_acc,xgbm_score,catb_final_score,knn_sscore,svm_scaled_score,rf_scaled_score,lgbm_scaled_acc]

plt.figure(figsize=(12,8))
sns.barplot(x=indexx,y=regressions)
plt.xticks(rotation=45)
plt.title('Model Comparision',color = 'green',fontsize=20);
In [208]:
pie_list=regressions
labels=list(zip(indexx,regressions))
fig={
    "data":[
        {
            "values":pie_list,
            "labels":labels,
            "domain": {"x": [.2, 1]},
            "name": "Models-Accuracy Score",
            "hoverinfo":"label+percent+name",
            "hole": .4,
            "type": "pie"
        },],
    "layout":{
        "title":"Accuracy Scores",
        "annotations":[
            {
                "font":{"size":20},
                "showarrow": False,
                "text": "Model Scores",
                "x": 0.60,
                "y": 0.50
            },
        ]
    }  
}
iplot(fig)
In [ ]:
 
In [ ]:
Conclusion
LightGBM Boost model has the higest accuracy rate (87.1)
Cat Boost model is really successfull at catching 'true positives' of conf matrix =>>>(2356,73)(307,264)
GBM model is succesfull at catching 'false positives' conf matrix ==> (2339, 90) (297, 274)
Model tuning made quite improvement in all models.
We can experienced different scores for some models with normalization and standart scaler.
The final model is depends if we aiming catching exited members or all members(true-positive/false-positive).
In [ ]: